code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def _a ( UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "all_results.json" )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
else:
raise ValueError(F"can't find {path}" )
return results
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
import xla_spawn
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
lowerCAmelCase__ = time()
xla_spawn.main()
lowerCAmelCase__ = time()
lowerCAmelCase__ = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
import xla_spawn
lowerCAmelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__UpperCAmelCase , "argv" , __UpperCAmelCase ):
xla_spawn.main()
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
def _a ( UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase__ = len(UpperCamelCase_ )
print("The following activities are selected:" )
# The first activity is always selected
lowerCAmelCase__ = 0
print(UpperCamelCase_ , end="," )
# Consider rest of the activities
for j in range(UpperCamelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCamelCase_ , end="," )
lowerCAmelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [1, 3, 0, 5, 8, 5]
a_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( UpperCamelCase_ : int="" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
return os.path.join(UpperCamelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = AgentAudio(__UpperCAmelCase )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , torch.tensor(__UpperCAmelCase ) , atol=1E-4 ) )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = get_new_path(suffix=".wav" )
sf.write(__UpperCAmelCase , __UpperCAmelCase , 16000 )
lowerCAmelCase__ = AgentAudio(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __UpperCAmelCase )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = torch.randint(0 , 256 , (64, 64, 3) )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase__ = Image.open(__UpperCAmelCase )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase__ = Image.open(__UpperCAmelCase )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "Hey!"
lowerCAmelCase__ = AgentText(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , agent_type.to_string() )
self.assertEqual(__UpperCAmelCase , agent_type.to_raw() )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a_ = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
a_ =CLIPConfig
a_ =["""CLIPEncoderLayer"""]
def __init__( self , __UpperCAmelCase )-> int:
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ = CLIPVisionModelWithProjection(config.vision_config )
lowerCAmelCase__ = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCAmelCase__ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0.5 , __UpperCAmelCase=0.5 )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.vision_model(__UpperCAmelCase )[0]
lowerCAmelCase__ = self.p_head(__UpperCAmelCase )
lowerCAmelCase__ = nsfw_detected.flatten()
lowerCAmelCase__ = nsfw_detected > p_threshold
lowerCAmelCase__ = nsfw_detected.tolist()
if any(__UpperCAmelCase ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(__UpperCAmelCase ):
if nsfw_detected_:
lowerCAmelCase__ = np.zeros(images[idx].shape )
lowerCAmelCase__ = self.w_head(__UpperCAmelCase )
lowerCAmelCase__ = watermark_detected.flatten()
lowerCAmelCase__ = watermark_detected > w_threshold
lowerCAmelCase__ = watermark_detected.tolist()
if any(__UpperCAmelCase ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(__UpperCAmelCase ):
if watermark_detected_:
lowerCAmelCase__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _a ( UpperCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = 384
lowerCAmelCase__ = 7
if "tiny" in model_name:
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 6, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase__ = 128
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (4, 8, 16, 32)
lowerCAmelCase__ = 12
lowerCAmelCase__ = 512
elif "large" in model_name:
lowerCAmelCase__ = 192
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (6, 12, 24, 48)
lowerCAmelCase__ = 12
lowerCAmelCase__ = 768
# set label information
lowerCAmelCase__ = 150
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "ade20k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = SwinConfig(
embed_dim=UpperCamelCase_ , depths=UpperCamelCase_ , num_heads=UpperCamelCase_ , window_size=UpperCamelCase_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
lowerCAmelCase__ = UperNetConfig(
backbone_config=UpperCamelCase_ , auxiliary_in_channels=UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , )
return config
def _a ( UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> int:
"""simple docstring"""
lowerCAmelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:dim, :]
lowerCAmelCase__ = in_proj_bias[: dim]
lowerCAmelCase__ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase__ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase__ = in_proj_weight[
-dim :, :
]
lowerCAmelCase__ = in_proj_bias[-dim :]
# fmt: on
def _a ( UpperCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = x.shape
lowerCAmelCase__ = x.reshape(UpperCamelCase_ , 4 , in_channel // 4 )
lowerCAmelCase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase_ , UpperCamelCase_ )
return x
def _a ( UpperCamelCase_ : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = x.shape
lowerCAmelCase__ = x.reshape(UpperCamelCase_ , in_channel // 4 , 4 )
lowerCAmelCase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase_ , UpperCamelCase_ )
return x
def _a ( UpperCamelCase_ : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ = x.shape[0]
lowerCAmelCase__ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase_ )
return x
def _a ( UpperCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = x.shape[0]
lowerCAmelCase__ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase_ )
return x
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowerCAmelCase__ = model_name_to_url[model_name]
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" , file_name=UpperCamelCase_ )[
"state_dict"
]
for name, param in state_dict.items():
print(UpperCamelCase_ , param.shape )
lowerCAmelCase__ = get_upernet_config(UpperCamelCase_ )
lowerCAmelCase__ = UperNetForSemanticSegmentation(UpperCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
if "bn" in key:
lowerCAmelCase__ = key.replace("bn" , "batch_norm" )
lowerCAmelCase__ = val
# rename keys
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase__ = reverse_correct_unfold_reduction_order(UpperCamelCase_ )
if "norm" in key:
lowerCAmelCase__ = reverse_correct_unfold_norm_order(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
# verify on image
lowerCAmelCase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" )
lowerCAmelCase__ = SegformerImageProcessor()
lowerCAmelCase__ = processor(UpperCamelCase_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase__ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase__ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase__ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase__ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a_ = logging.get_logger(__name__)
a_ = {}
a_ = {}
a_ = {}
def _a ( UpperCamelCase_ : type , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[List[str]] = None , ) -> str:
"""simple docstring"""
lowerCAmelCase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
lowerCAmelCase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
lowerCAmelCase__ = format_type
def _a ( UpperCamelCase_ : Exception , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[List[str]] = None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def _a ( UpperCamelCase_ : Optional[str] ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _a ( UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Dict ) -> Formatter:
"""simple docstring"""
lowerCAmelCase__ = get_format_type_from_alias(UpperCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
import datasets
a_ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
a_ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
a_ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ = get_tests_dir('''fixtures''')
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = mock.Mock()
lowerCAmelCase__ = 500
lowerCAmelCase__ = {}
lowerCAmelCase__ = HTTPError
lowerCAmelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase ) as mock_head:
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls )-> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCAmelCase , repo_id="test-feature-extractor" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase__ = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
from manim import *
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("CPU" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(4 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("GPU" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Model" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Loaded Checkpoint" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCAmelCase__ = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
a_ =field(default="""image-classification""", metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ =Features({"""image""": Image()} )
a_ =Features({"""labels""": ClassLabel} )
a_ ="image"
a_ ="labels"
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
lowerCAmelCase__ = copy.deepcopy(self )
lowerCAmelCase__ = self.label_schema.copy()
lowerCAmelCase__ = features[self.label_column]
lowerCAmelCase__ = label_schema
return task_template
@property
def UpperCAmelCase ( self )-> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = "https://pypi.org/pypi/diffusers/json"
lowerCAmelCase__ = json.loads(request.urlopen(UpperCamelCase_ ).read() )["releases"].keys()
return sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : version.Version(UpperCamelCase_ ) )
def _a ( ) -> int:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = Path(UpperCamelCase_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( UpperCamelCase_ : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
lowerCAmelCase__ = Path(UpperCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase__ = re.findall("^\s*import\s+\.(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase_ ) )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = [module_file]
lowerCAmelCase__ = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase_ ) )
lowerCAmelCase__ = Path(UpperCamelCase_ ).parent
lowerCAmelCase__ = [str(module_path / m ) for m in new_imports]
lowerCAmelCase__ = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase__ = [F"{f}.py" for f in new_import_files]
lowerCAmelCase__ = len(UpperCamelCase_ ) == 0
all_relative_imports.extend(UpperCamelCase_ )
return all_relative_imports
def _a ( UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.read()
# Imports of the form `import xxx`
lowerCAmelCase__ = re.findall("^\s*import\s+(\S+)\s*$" , UpperCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , UpperCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase__ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
lowerCAmelCase__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase_ )
except ImportError:
missing_packages.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"{', '.join(UpperCamelCase_ )}. Run `pip install {' '.join(UpperCamelCase_ )}`" )
return get_relative_imports(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = module_path.replace(os.path.sep , "." )
lowerCAmelCase__ = importlib.import_module(UpperCamelCase_ )
if class_name is None:
return find_pipeline_class(UpperCamelCase_ )
return getattr(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCAmelCase__ = dict(inspect.getmembers(UpperCamelCase_ , inspect.isclass ) )
lowerCAmelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
lowerCAmelCase__ = cls
return pipeline_class
def _a ( UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[Dict[str, str]] = None , UpperCamelCase_ : Optional[Union[bool, str]] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = False , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = str(UpperCamelCase_ )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
lowerCAmelCase__ = module_file_or_url
lowerCAmelCase__ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
lowerCAmelCase__ = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase__ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase__ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
lowerCAmelCase__ = F"v{revision}"
elif revision == "main":
lowerCAmelCase__ = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
lowerCAmelCase__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase_ , pipeline=UpperCamelCase_ )
try:
lowerCAmelCase__ = cached_download(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
lowerCAmelCase__ = "git"
lowerCAmelCase__ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase__ = hf_hub_download(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , )
lowerCAmelCase__ = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
lowerCAmelCase__ = check_imports(UpperCamelCase_ )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase_ )
lowerCAmelCase__ = Path(UpperCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase__ = F"{module_needed}.py"
shutil.copy(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = use_auth_token
elif use_auth_token is True:
lowerCAmelCase__ = HfFolder.get_token()
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = model_info(UpperCamelCase_ , revision=UpperCamelCase_ , token=UpperCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase__ = submodule_path / commit_hash
lowerCAmelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase_ , F"{module_needed}.py" , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return os.path.join(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[Dict[str, str]] = None , UpperCamelCase_ : Optional[Union[bool, str]] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_cached_module_file(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
return get_class_in_module(UpperCamelCase_ , final_module.replace(".py" , "" ) )
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ = inputs["prompt"]
lowerCAmelCase__ = inputs["generator"]
lowerCAmelCase__ = inputs["num_inference_steps"]
lowerCAmelCase__ = inputs["output_type"]
if "image" in inputs:
lowerCAmelCase__ = inputs["image"]
else:
lowerCAmelCase__ = None
if "mask_image" in inputs:
lowerCAmelCase__ = inputs["mask_image"]
else:
lowerCAmelCase__ = None
if "original_image" in inputs:
lowerCAmelCase__ = inputs["original_image"]
else:
lowerCAmelCase__ = None
lowerCAmelCase__ , lowerCAmelCase__ = pipe.encode_prompt(__UpperCAmelCase )
# inputs with prompt converted to embeddings
lowerCAmelCase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ = image
if mask_image is not None:
lowerCAmelCase__ = mask_image
if original_image is not None:
lowerCAmelCase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = pipe(**__UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = self.pipeline_class.from_pretrained(__UpperCAmelCase )
pipe_loaded.to(__UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCAmelCase , __UpperCAmelCase ) is None , F"`{optional_component}` did not stay set to None after loading." , )
lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ = inputs["generator"]
lowerCAmelCase__ = inputs["num_inference_steps"]
lowerCAmelCase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowerCAmelCase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ = image
if mask_image is not None:
lowerCAmelCase__ = mask_image
if original_image is not None:
lowerCAmelCase__ = original_image
lowerCAmelCase__ = pipe_loaded(**__UpperCAmelCase )[0]
lowerCAmelCase__ = np.abs(to_np(__UpperCAmelCase ) - to_np(__UpperCAmelCase ) ).max()
self.assertLess(__UpperCAmelCase , 1E-4 )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ = pipe(**__UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = self.pipeline_class.from_pretrained(__UpperCAmelCase )
pipe_loaded.to(__UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ = pipe_loaded(**__UpperCAmelCase )[0]
lowerCAmelCase__ = np.abs(to_np(__UpperCAmelCase ) - to_np(__UpperCAmelCase ) ).max()
self.assertLess(__UpperCAmelCase , 1E-4 )
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =StableDiffusionInpaintPipeline
a_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
a_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ =frozenset([] )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase__ = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
lowerCAmelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(__UpperCAmelCase ).startswith("mps" ):
lowerCAmelCase__ = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableDiffusionInpaintPipeline(**__UpperCAmelCase )
lowerCAmelCase__ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCAmelCase__ = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" )
lowerCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import os
from math import logaa
def _a ( UpperCamelCase_ : str = "base_exp.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ = list(map(UpperCamelCase_ , line.split("," ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
lowerCAmelCase__ = x * logaa(UpperCamelCase_ )
lowerCAmelCase__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
from __future__ import annotations
import math
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _a ( UpperCamelCase_ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowerCAmelCase__ = []
for num in range(len(UpperCamelCase_ ) ):
lowerCAmelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase_ ) == n:
return list_nums
return []
def _a ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a_ = logging.getLogger(__name__)
a_ = {'''facebook/bart-base''': BartForConditionalGeneration}
a_ = {'''facebook/bart-base''': BartTokenizer}
def _a ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=UpperCamelCase_ , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=UpperCamelCase_ , default=UpperCamelCase_ , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCamelCase_ , )
parser.add_argument(
"--config_name" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=UpperCamelCase_ , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Where to store the final ONNX file." )
lowerCAmelCase__ = parser.parse_args()
return args
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]="cpu" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = model_dict[model_name].from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase__ = tokenizer_dict[model_name].from_pretrained(UpperCamelCase_ )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
return huggingface_model, tokenizer
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
model.eval()
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase_ ) )
with torch.no_grad():
lowerCAmelCase__ = "My friends are cool but they eat too many carbs."
lowerCAmelCase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
lowerCAmelCase__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=UpperCamelCase_ , max_length=UpperCamelCase_ , early_stopping=UpperCamelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase_ , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=UpperCamelCase_ , )
logger.info("Model exported to {}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = remove_dup_initializers(os.path.abspath(UpperCamelCase_ ) )
logger.info("Deduplicated and optimized model written to {}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = onnxruntime.InferenceSession(UpperCamelCase_ )
lowerCAmelCase__ = ort_sess.run(
UpperCamelCase_ , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(UpperCamelCase_ ),
"max_length": np.array(UpperCamelCase_ ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = parse_args()
lowerCAmelCase__ = 5
lowerCAmelCase__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ = torch.device(args.device )
lowerCAmelCase__ , lowerCAmelCase__ = load_model_tokenizer(args.model_name_or_path , UpperCamelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(UpperCamelCase_ )
if args.max_length:
lowerCAmelCase__ = args.max_length
if args.num_beams:
lowerCAmelCase__ = args.num_beams
if args.output_file_path:
lowerCAmelCase__ = args.output_file_path
else:
lowerCAmelCase__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
a_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.getbasetemp() / "cache"
lowerCAmelCase__ = test_hf_cache_home / "datasets"
lowerCAmelCase__ = test_hf_cache_home / "metrics"
lowerCAmelCase__ = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(UpperCamelCase_ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(UpperCamelCase_ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(UpperCamelCase_ ) )
lowerCAmelCase__ = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(UpperCamelCase_ ) )
lowerCAmelCase__ = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCamelCase_ ) )
@pytest.fixture(autouse=UpperCamelCase_ , scope="session" )
def _a ( ) -> List[str]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , UpperCamelCase_ )
@pytest.fixture
def _a ( UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , UpperCamelCase_ )
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = field
a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
a = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[Any] , ) ->int:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
a = dataset
a = path_or_buf
a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a = num_proc
a = '''utf-8'''
a = to_json_kwargs
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
a = self.to_json_kwargs.pop('''orient''' , '''records''' )
a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
a = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
a = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
a = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
a , a , a , a , a = args
a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] , ) ->int:
"""simple docstring"""
a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
a , a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = credit_card_number
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(snake_case_ ) - 2
for i in range(snake_case_ , -1 , -2 ):
# double the value of every second digit
UpperCAmelCase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase_ = cc_number[:i] + str(snake_case_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCAmelCase_ ( snake_case_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(snake_case_ ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(snake_case_ ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(snake_case_ ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : Dict=30 , UpperCamelCase : int=2 , UpperCamelCase : Any=3 , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=4 , UpperCamelCase : Any=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=10 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=3 , UpperCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = TFViTModel(config=UpperCamelCase )
lowercase__ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase )
lowercase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase__ (self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = TFViTForImageClassification(UpperCamelCase )
lowercase__ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTForImageClassification(UpperCamelCase )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ : Tuple = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = TFViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , tf.keras.layers.Layer ) )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase )
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''tf''' )
# forward pass
lowercase__ = model(**UpperCamelCase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowercase__ = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
| 2 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Optional[int] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : str ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase = None
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
elif name.split('.' )[0] == "proj":
lowerCAmelCase = fairseq_model.proj
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase )
def a_ ( lowerCamelCase : List[str] ):
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
lowerCAmelCase = emb.weight.data
return lin_layer
def a_ ( lowerCamelCase : Optional[int] ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [line.split(' ' )[0] for line in lines]
lowerCAmelCase = len(lowerCamelCase )
lowerCAmelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(lowerCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a_ ( lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , ):
lowerCAmelCase = WavaVecaConfig.from_pretrained(lowerCamelCase )
lowerCAmelCase = SpeechaTextaConfig.from_pretrained(
lowerCamelCase , vocab_size=lowerCamelCase , decoder_layers=lowerCamelCase , do_stable_layer_norm=lowerCamelCase )
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase = WavaVecaModel(lowerCamelCase )
lowerCAmelCase = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase )
lowerCAmelCase = SpeechaTextaForCausalLM(lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCAmelCase = SpeechEncoderDecoderModel(encoder=lowerCamelCase , decoder=lowerCamelCase )
lowerCAmelCase = False
# add projection layer
lowerCAmelCase = nn.Parameter(projection_layer.weight )
lowerCAmelCase = nn.Parameter(projection_layer.bias )
lowerCAmelCase = create_vocab_dict(lowerCamelCase )
with open(os.path.join(lowerCamelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = SpeechaTextaTokenizer(os.path.join(lowerCamelCase , 'vocab.json' ) )
tokenizer.save_pretrained(lowerCamelCase )
lowerCAmelCase = hf_wavavec.config.to_dict()
lowerCAmelCase = tokenizer.pad_token_id
lowerCAmelCase = tokenizer.bos_token_id
lowerCAmelCase = tokenizer.eos_token_id
lowerCAmelCase = 'speech_to_text_2'
lowerCAmelCase = 'wav2vec2'
lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
feature_extractor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=10_224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
__snake_case =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 4 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
def __lowerCAmelCase ( a__ ) -> int:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__a = len(a__ )
__a = max(a__ )
__a = min(a__ )
# create the counting array
__a = coll_max + 1 - coll_min
__a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
__a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
__a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCAmelCase ( a__ ) -> List[str]:
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A : Dict = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 6 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 4000000 ) -> int:
'''simple docstring'''
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ):
snake_case_ = {}
if train_file is not None:
snake_case_ = [train_file]
if eval_file is not None:
snake_case_ = [eval_file]
if test_file is not None:
snake_case_ = [test_file]
snake_case_ = datasets.load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE__ )
snake_case_ = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ = features_name.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
snake_case_ = tokenizer.model_input_names
snake_case_ = {}
if len(SCREAMING_SNAKE_CASE__ ) == 1:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) , batched=SCREAMING_SNAKE_CASE__ , )
elif len(SCREAMING_SNAKE_CASE__ ) == 2:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) , batched=SCREAMING_SNAKE_CASE__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={"help": "Which column contains the label"} )
SCREAMING_SNAKE_CASE : str = field(default=__A , metadata={"help": "The path of the training file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the development file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the test file"} )
SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_, snake_case_, snake_case_, snake_case_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ = TFTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main() | 8 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
SCREAMING_SNAKE_CASE__ : Dict = '''CIDAS/clipseg-rd64-refined'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''image_segmenter'''
SCREAMING_SNAKE_CASE__ : Tuple = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE__ : List[Any] = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ : str = ['''image''']
def __init__( self :Dict , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :"Image" , lowerCAmelCase__ :str ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors='''pt''' )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**lowerCAmelCase__ ).logits
return logits
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 9 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase_ ( ) -> tuple[list[int], int]:
"""simple docstring"""
lowerCamelCase__: List[Any] =[randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase__: str =randint(-5000 , 5000 )
return (arr, r)
__A = make_dataset()
def lowerCAmelCase_ ( __a , __a ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__a , 3 ):
if sum(__a ) == target:
return tuple(sorted(__a ) )
return (0, 0, 0)
def lowerCAmelCase_ ( __a , __a ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
lowerCamelCase__: int =len(__a )
for i in range(n - 1 ):
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase_ ( ) -> tuple[float, float]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowerCamelCase__: Any ="\ntriplet_sum1(*dataset)\n"
lowerCamelCase__: Optional[Any] ="\ntriplet_sum2(*dataset)\n"
lowerCamelCase__: int =repeat(setup=__a , stmt=__a , repeat=5 , number=10000 )
lowerCamelCase__: Optional[int] =repeat(setup=__a , stmt=__a , repeat=5 , number=10000 )
return (min(__a ), min(__a ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 10 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "gpt_neo"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
__SCREAMING_SNAKE_CASE = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __lowerCamelCase=5_0_2_5_7 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=2_4 , __lowerCamelCase=[[["global", "local"], 1_2]] , __lowerCamelCase=1_6 , __lowerCamelCase=None , __lowerCamelCase=2_5_6 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , __lowerCamelCase=True , __lowerCamelCase=5_0_2_5_6 , __lowerCamelCase=5_0_2_5_6 , **__lowerCamelCase , ) -> Union[str, Any]:
_A : str = vocab_size
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = hidden_size
_A : Dict = num_layers
_A : str = num_heads
_A : Optional[Any] = intermediate_size
_A : str = window_size
_A : Dict = activation_function
_A : List[str] = resid_dropout
_A : Union[str, Any] = embed_dropout
_A : Dict = attention_dropout
_A : int = classifier_dropout
_A : List[Any] = layer_norm_epsilon
_A : List[str] = initializer_range
_A : Any = use_cache
_A : Any = bos_token_id
_A : str = eos_token_id
_A : Optional[Any] = attention_types
_A : Dict = self.expand_attention_types_params(__lowerCamelCase)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument.")
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
@staticmethod
def _lowerCamelCase ( __lowerCamelCase) -> Any:
_A : List[Any] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
import torch
_A : str = input.size()
_A : Any = len(UpperCamelCase__ )
_A : List[str] = shape[dimension]
_A : Optional[Any] = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_A : str = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode="floor" ) + 1
_A : Dict = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_A : str = [slice(UpperCamelCase__ )] * rank
_A : int = indices
_A : List[Any] = input[s]
_A : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
import torch
_A : Dict = torch.arange(1 , UpperCamelCase__ )
_A : List[str] = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_A : List[Any] = remainders == 0
_A : str = candidates[divisor_indices]
_A : Optional[int] = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode="floor" )
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
_A : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs")
_A : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_A : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCamelCase ( self) -> int:
return self._config.num_heads
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Any = super(__lowerCamelCase , self).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
# We need to order the input in the way they appears in the forward()
_A : str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A : Dict = seqlen + 2
_A : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_A : Dict = [
(torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(self.num_layers)
]
_A : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
_A : Optional[Any] = ordered_inputs["attention_mask"].dtype
_A : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1)
return ordered_inputs
@property
def _lowerCamelCase ( self) -> int:
return 1_3
| 11 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
from bisect import bisect
from itertools import accumulate
def lowerCamelCase__ ( A__ : List[Any] , A__ : Dict , A__ : str , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = sorted(zip(A__ , A__ ) , key=lambda A__ : x[0] / x[1] , reverse=A__ )
__lowerCamelCase, __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(A__ ) )
__lowerCamelCase = bisect(A__ , A__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Load checkpoint
SCREAMING_SNAKE_CASE_: List[str] = torch.load(_UpperCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_: List[str] = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE_: List[Any] = v
else:
SCREAMING_SNAKE_CASE_: Tuple = v
SCREAMING_SNAKE_CASE_: str = chkpt["params"]
SCREAMING_SNAKE_CASE_: List[Any] = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
SCREAMING_SNAKE_CASE_: Tuple = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE_: Dict = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE_: int = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE_: Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE_: List[Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + "\n" )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_UpperCAmelCase , indent=2 ) + "\n" )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 13 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Any) ->Dict:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
A__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__):
if i == len(UpperCAmelCase__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(UpperCAmelCase__)
A__ = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
A__ = len(UpperCAmelCase__)
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
| 14 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : int ):
__A = n
__A = [None] * self.n
__A = 0 # index of the first element
__A = 0
__A = 0
def __len__( self : Tuple ):
return self.size
def UpperCamelCase_ ( self : int ):
return self.size == 0
def UpperCamelCase_ ( self : str ):
return False if self.is_empty() else self.array[self.front]
def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ):
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
__A = data
__A = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
if self.size == 0:
raise Exception("UNDERFLOW" )
__A = self.array[self.front]
__A = None
__A = (self.front + 1) % self.n
self.size -= 1
return temp
| 15 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ""
lowerCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase : str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Union[str, Any] ,_snake_case : str = "" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,**_snake_case : int ) -> Any:
"""simple docstring"""
super().__init__(self ,**_snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase__ : Dict = fsspec.open(
_snake_case ,mode='''rb''' ,protocol=_snake_case ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowercase__ : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
lowercase__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowercase__ : int = None
@classmethod
def UpperCAmelCase ( cls : List[Any] ,_snake_case : str ) -> List[Any]:
"""simple docstring"""
return super()._strip_protocol(_snake_case ).lstrip('''/''' )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if self.dir_cache is None:
lowercase__ : Any = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowercase__ : int = {f['''name''']: f}
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : str ) -> Dict:
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase ( self : Tuple ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : Any=None ,_snake_case : Tuple=True ,_snake_case : str=None ,**_snake_case : Optional[int] ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self._strip_protocol(_snake_case )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "bz2"
lowerCAmelCase : List[Any] = "bz2"
lowerCAmelCase : Union[str, Any] = ".bz2"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "gzip"
lowerCAmelCase : Any = "gzip"
lowerCAmelCase : Optional[Any] = ".gz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "lz4"
lowerCAmelCase : int = "lz4"
lowerCAmelCase : Optional[int] = ".lz4"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "xz"
lowerCAmelCase : Any = "xz"
lowerCAmelCase : Any = ".xz"
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "zstd"
lowerCAmelCase : str = "zstd"
lowerCAmelCase : Tuple = ".zst"
def __init__( self : Optional[int] ,_snake_case : str ,_snake_case : str = "rb" ,_snake_case : Optional[str] = None ,_snake_case : Optional[dict] = None ,_snake_case : int = DEFAULT_BLOCK_SIZE ,**_snake_case : List[str] ,) -> List[str]:
"""simple docstring"""
super().__init__(
fo=_snake_case ,mode=_snake_case ,target_protocol=_snake_case ,target_options=_snake_case ,block_size=_snake_case ,**_snake_case ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase__ : Optional[Any] = self.file.__enter__
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = file_
def __enter__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any ,*_snake_case : Any ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self._file.__exit__(*_snake_case ,**_snake_case )
def __iter__( self : str ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return next(self._file )
def __getattr__( self : Any ,_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file ,_snake_case )
def fixed_enter(*_snake_case : Dict ,**_snake_case : str ):
return WrappedFile(_enter(*_snake_case ,**_snake_case ) )
lowercase__ : Union[str, Any] = fixed_enter
| 16 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = ["keras_nlp"]
def __init__( self : str, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Union[str, Any] ):
requires_backends(self, ["keras_nlp"] )
| 17 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCamelCase : Tuple = None
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : List[str] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
__lowerCamelCase : Optional[Any] = '''▁'''
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BigBirdTokenizer
A = ['input_ids', 'attention_mask']
A = []
def __init__( self : Union[str, Any],_A : Any=None,_A : Any=None,_A : str="<unk>",_A : str="<s>",_A : int="</s>",_A : Union[str, Any]="<pad>",_A : Dict="[SEP]",_A : int="[MASK]",_A : int="[CLS]",**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
_A,tokenizer_file=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False if not self.vocab_file else True
def __UpperCamelCase ( self : Union[str, Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Union[str, Any],_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : List[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : str,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file,_A )
return (out_vocab_file,)
| 18 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
from math import factorial
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0 ):
return sum(map(lowerCamelCase__ , str(factorial(lowerCamelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 19 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
lowercase : str = 256
# Modulus to hash a string
lowercase : List[Any] = 1000003
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
lowercase : int = len(SCREAMING_SNAKE_CASE__ )
if p_len > t_len:
return False
lowercase : Dict = 0
lowercase : List[Any] = 0
lowercase : str = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case( ) -> None:
lowercase : Optional[int] = """abc1abc12"""
lowercase : Optional[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowercase : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 2)
lowercase : Dict = """ABABX"""
lowercase : List[str] = """ABABZABABYABABX"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 3)
lowercase : List[Any] = """AAAB"""
lowercase : int = """ABAAAAAB"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 4)
lowercase : Optional[int] = """abcdabcy"""
lowercase : Optional[Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 5)
lowercase : Optional[Any] = """Lü"""
lowercase : Optional[int] = """Lüsai"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = """Lue"""
assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 20 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE : Optional[int] = "\nimport os\n"
SCREAMING_SNAKE_CASE : str = "\ndef foo():\n import os\n return False\n"
SCREAMING_SNAKE_CASE : str = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
SCREAMING_SNAKE_CASE : Tuple = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : Dict = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : Dict = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : Tuple = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : Tuple = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : List[Any] = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
SCREAMING_SNAKE_CASE : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : List[str] = os.path.join(lowerCamelCase_ , 'test_file.py' )
with open(lowerCamelCase_ , 'w' ) as _tmp_file:
_tmp_file.write(lowerCamelCase_ )
_lowercase : Tuple = get_imports(lowerCamelCase_ )
assert parsed_imports == ["os"]
| 21 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int]=1_3 , snake_case_ : Dict=7 , snake_case_ : List[Any]=True , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : str=True , snake_case_ : str=True , snake_case_ : int=False , snake_case_ : Union[str, Any]=False , snake_case_ : List[str]=False , snake_case_ : List[Any]=2 , snake_case_ : List[str]=9_9 , snake_case_ : str=0 , snake_case_ : List[str]=3_2 , snake_case_ : str=5 , snake_case_ : Optional[int]=4 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=5_1_2 , snake_case_ : Dict=2 , snake_case_ : str=0.0_2 , snake_case_ : List[Any]=2 , snake_case_ : Tuple=4 , snake_case_ : Union[str, Any]="last" , snake_case_ : List[Any]=True , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
_UpperCAmelCase = bos_token_id
def lowercase ( self : Dict ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase ( self : Optional[int] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase ( self : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : List[Any] , ):
_UpperCAmelCase = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
_UpperCAmelCase = model(snake_case_ , langs=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , ):
_UpperCAmelCase = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : int , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[str] , ):
_UpperCAmelCase = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
_UpperCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : List[Any] , ):
_UpperCAmelCase = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
_UpperCAmelCase = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((_UpperCAmelCase) , ) = result_with_labels.to_tuple()
_UpperCAmelCase = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
((_UpperCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , ):
_UpperCAmelCase = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] , ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase : Optional[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase ( self : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : Tuple=False ):
_UpperCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowercase ( self : Tuple ):
_UpperCAmelCase = XLMModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , emb_dim=3_7 )
def lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def lowercase ( self : int ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any=False , snake_case_ : Union[str, Any]=1 ):
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def lowercase ( self : Any , snake_case_ : Any , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : int=False , snake_case_ : List[Any]=1 ):
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def lowercase ( self : int ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : Any ):
_UpperCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case_ )
_UpperCAmelCase = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case_ ) # the president
_UpperCAmelCase = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_UpperCAmelCase = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 22 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : int ) -> bool:
UpperCAmelCase : Tuple = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set('''123456789''' )
def snake_case_ ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
UpperCAmelCase : Tuple = 100002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCAmelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 23 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
from math import factorial
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = real
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = [1] * rank
else:
SCREAMING_SNAKE_CASE__ : int = rank
def __repr__(self ) -> Optional[int]:
"""simple docstring"""
return (
F'''{self.real}+'''
F'''{'+'.join(str(SCREAMING_SNAKE_CASE__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE__ )
def __add__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.duals.copy()
SCREAMING_SNAKE_CASE__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
elif len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : int = __add__
def __sub__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return self + other * -1
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : Any = __mul__
def __truediv__(self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __floordiv__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __pow__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if n < 0 or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if not callable(_snake_case ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_snake_case ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("""differentiate() requires an int as input for order""" )
SCREAMING_SNAKE_CASE__ : Any = Dual(_snake_case ,1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = func(_snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase_ ( _snake_case ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 25 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "LayoutLMv3ImageProcessor"
_a = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
_A : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
_A : Optional[int] = kwargs.pop("""feature_extractor""" )
_A : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
_A : Dict = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
_A : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A : Any = features["""words"""]
_A : Optional[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
_A : Tuple = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
_A : Union[str, Any] = self.get_overflowing_images(_a , encoded_inputs["""overflow_to_sample_mapping"""] )
_A : Union[str, Any] = images
return encoded_inputs
def a__ ( self , _a , _a ) -> Optional[int]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def a__ ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a__ ( self ) -> int:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 26 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=64 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=2 , __a=2 , __a=2 , __a=2 , __a=4 , __a=1 , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : Union[str, Any] = batch_size
__a : str = seq_length
__a : str = is_training
__a : str = use_input_mask
__a : Dict = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Any = num_hidden_layers
__a : Dict = num_attention_heads
__a : str = intermediate_size
__a : Optional[int] = hidden_act
__a : int = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Tuple = num_labels
__a : Tuple = num_choices
__a : Dict = scope
__a : int = q_groups
__a : Optional[Any] = k_groups
__a : List[str] = v_groups
__a : Union[str, Any] = post_attention_groups
__a : List[Any] = intermediate_groups
__a : Dict = output_groups
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : str = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : Tuple = None
__a : List[str] = None
__a : Tuple = None
if self.use_labels:
__a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = SqueezeBertModel(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , __a )
__a : List[str] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = SqueezeBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = SqueezeBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : int = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : int = SqueezeBertForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : Dict = SqueezeBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_choices
__a : int = SqueezeBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Dict = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Any = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A_ = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = True
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = SqueezeBertModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , dim=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = SqueezeBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
__a : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
__a : List[Any] = model(__a )[0]
__a : int = torch.Size((1, 3) )
self.assertEqual(output.shape , __a )
__a : str = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(__a , __a , atol=1E-4 ) )
| 27 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@staticmethod
@abstractmethod
def A ( UpperCamelCase__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def A ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 28 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a ( snake_case__: str , snake_case__: Tuple ):
'''simple docstring'''
inspect_dataset(snake_case__ , snake_case__ )
lowercase_ = path + '''.py'''
assert script_name in os.listdir(snake_case__ )
assert "__pycache__" not in os.listdir(snake_case__ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a ( snake_case__: List[str] , snake_case__: Dict ):
'''simple docstring'''
inspect_metric(snake_case__ , snake_case__ )
lowercase_ = path + '''.py'''
assert script_name in os.listdir(snake_case__ )
assert "__pycache__" not in os.listdir(snake_case__ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: Dict , snake_case__: Optional[int] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = get_dataset_config_info(snake_case__ , config_name=snake_case__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( snake_case__: Any , snake_case__: Optional[int] , snake_case__: List[Any] ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
get_dataset_config_info(snake_case__ , config_name=snake_case__ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a ( snake_case__: int , snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = get_dataset_config_names(snake_case__ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
lowercase_ = get_dataset_infos(snake_case__ )
assert list(infos.keys() ) == expected_configs
lowercase_ = expected_configs[0]
assert expected_config in infos
lowercase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( snake_case__: str , snake_case__: int , snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = get_dataset_infos(snake_case__ )
assert expected_config in infos
lowercase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( snake_case__: List[str] , snake_case__: str , snake_case__: Optional[Any] ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
get_dataset_split_names(snake_case__ , config_name=snake_case__ )
| 30 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__SCREAMING_SNAKE_CASE : Tuple = random.Random()
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=None ) -> str:
"""simple docstring"""
if rng is None:
_UpperCAmelCase : List[Any] = global_rng
_UpperCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=7 , A : List[Any]=400 , A : Union[str, Any]=2000 , A : str=24 , A : Optional[Any]=24 , A : Optional[Any]=0.0 , A : Optional[int]=16000 , A : str=True , A : Optional[Any]=True , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Tuple = min_seq_length
_UpperCAmelCase : List[str] = max_seq_length
_UpperCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Dict = feature_size
_UpperCAmelCase : List[Any] = num_mel_bins
_UpperCAmelCase : Union[str, Any] = padding_value
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : Optional[int] = return_attention_mask
_UpperCAmelCase : Tuple = do_normalize
def _A ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self : str , A : Tuple=False , A : Any=False ):
def _flatten(A : Optional[Any] ):
return list(itertools.chain(*A ) )
if equal_length:
_UpperCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Optional[int] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: int = SpeechaTextFeatureExtractor if is_speech_available() else None
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def _A ( self : Optional[Any] , A : Any ):
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1E-3 ) )
def _A ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : int = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : Tuple = feature_extractor(A , padding=A , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test batched
_UpperCAmelCase : List[str] = feature_extractor(A , return_tensors="np" ).input_features
_UpperCAmelCase : List[str] = feature_extractor(A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase : Tuple = np.asarray(A )
_UpperCAmelCase : Union[str, Any] = feature_extractor(A , return_tensors="np" ).input_features
_UpperCAmelCase : Any = feature_extractor(A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1E-3 ) )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Tuple = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : int = [None, 16, None]
for max_length, padding in zip(A , A ):
_UpperCAmelCase : str = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
_UpperCAmelCase : int = inputs.input_features
_UpperCAmelCase : Any = inputs.attention_mask
_UpperCAmelCase : Dict = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Optional[int] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : Optional[int] = [None, 16, None]
for max_length, padding in zip(A , A ):
_UpperCAmelCase : List[Any] = feature_extractor(
A , max_length=A , padding=A , return_tensors="np" , return_attention_mask=A )
_UpperCAmelCase : Dict = inputs.input_features
_UpperCAmelCase : Dict = inputs.attention_mask
_UpperCAmelCase : str = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A ( self : Dict ):
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Optional[int] = feature_extractor(
A , padding="max_length" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , )
_UpperCAmelCase : List[Any] = inputs.input_features
_UpperCAmelCase : List[Any] = inputs.attention_mask
_UpperCAmelCase : List[str] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _A ( self : Optional[int] ):
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Optional[int] = feature_extractor(
A , padding="longest" , max_length=4 , truncation=A , return_tensors="np" , return_attention_mask=A , )
_UpperCAmelCase : Dict = inputs.input_features
_UpperCAmelCase : Tuple = inputs.attention_mask
_UpperCAmelCase : List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Dict = feature_extractor(
A , padding="longest" , max_length=16 , truncation=A , return_tensors="np" , return_attention_mask=A , )
_UpperCAmelCase : int = inputs.input_features
_UpperCAmelCase : Tuple = inputs.attention_mask
_UpperCAmelCase : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _A ( self : Optional[int] ):
import torch
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : str = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : List[str] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _A ( self : Dict , A : List[str] ):
from datasets import load_dataset
_UpperCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : Dict = ds.sort("id" ).select(range(A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _A ( self : str ):
# fmt: off
_UpperCAmelCase : str = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
_UpperCAmelCase : int = self._load_datasamples(1 )
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Optional[int] = feature_extractor(A , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1E-4 ) )
| 31 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 32 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( _A , _A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = "pixel_values"
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TimmBackboneConfig
def __init__( self : str , A : Any , **A : Union[str, Any] ) -> List[Any]:
requires_backends(self , '''timm''' )
super().__init__(A )
lowercase_ : str = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(A , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase_ : Optional[Any] = getattr(A , '''use_pretrained_backbone''' , A )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase_ : Union[str, Any] = config.out_indices if getattr(A , '''out_indices''' , A ) is not None else (-1,)
lowercase_ : Dict = timm.create_model(
config.backbone , pretrained=A , features_only=config.features_only , in_chans=config.num_channels , out_indices=A , **A , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase_ : Dict = self._backbone.return_layers
lowercase_ : Union[str, Any] = {layer['''module''']: str(A ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(A )
@classmethod
def A ( cls : Dict , A : Union[str, Any] , *A : Tuple , **A : str ) -> Optional[Any]:
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase_ : Optional[Any] = kwargs.pop('''config''' , TimmBackboneConfig() )
lowercase_ : Tuple = kwargs.pop('''use_timm_backbone''' , A )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase_ : Any = kwargs.pop('''num_channels''' , config.num_channels )
lowercase_ : Any = kwargs.pop('''features_only''' , config.features_only )
lowercase_ : Any = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowercase_ : List[str] = kwargs.pop('''out_indices''' , config.out_indices )
lowercase_ : List[Any] = TimmBackboneConfig(
backbone=A , num_channels=A , features_only=A , use_pretrained_backbone=A , out_indices=A , )
return super()._from_config(A , **A )
def A ( self : Tuple , A : int ) -> int:
pass
def A ( self : Dict , A : Union[str, Any] , A : int=None , A : Optional[int]=None , A : Union[str, Any]=None , **A : Union[str, Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowercase_ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase_ : Any = self._all_layers
lowercase_ : Optional[int] = self._backbone(A , **A )
lowercase_ : Optional[Any] = self._return_layers
lowercase_ : Dict = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase_ : int = self._backbone(A , **A )
lowercase_ : str = None
lowercase_ : Tuple = tuple(A )
lowercase_ : List[str] = tuple(A ) if hidden_states is not None else None
if not return_dict:
lowercase_ : Optional[int] = (feature_maps,)
if output_hidden_states:
lowercase_ : List[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=A , hidden_states=A , attentions=A )
| 33 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = MBartConfig
lowercase = {}
lowercase = "gelu"
def __init__( self : Dict , snake_case_ : str , snake_case_ : Tuple=13 , snake_case_ : Optional[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : List[str]=False , snake_case_ : Any=99 , snake_case_ : str=32 , snake_case_ : int=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=37 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.1 , snake_case_ : Dict=20 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]=1 , snake_case_ : Dict=0 , ):
snake_case__ : int = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : str = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : Tuple = bos_token_id
def lowerCamelCase ( self : Dict ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ : List[Any] = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCamelCase ( self : int , snake_case_ : Dict , snake_case_ : str ):
snake_case__ : Union[str, Any] = TFMBartModel(config=snake_case_ ).get_decoder()
snake_case__ : Any = inputs_dict["""input_ids"""]
snake_case__ : Union[str, Any] = input_ids[:1, :]
snake_case__ : Any = inputs_dict["""attention_mask"""][:1, :]
snake_case__ : Optional[Any] = inputs_dict["""head_mask"""]
snake_case__ : Optional[int] = 1
# first forward pass
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ , snake_case__ : List[Any] = outputs.to_tuple()
snake_case__ : Tuple = past_key_values[1]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> str:
if attention_mask is None:
snake_case__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def lowerCamelCase ( self : str , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[str] ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase ( self : str ):
snake_case__ : int = TFMBartModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowercase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowercase = "facebook/mbart-large-en-ro"
@cached_property
def lowerCamelCase ( self : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self : Union[str, Any] , **snake_case_ : int ):
snake_case__ : Tuple = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : Any ):
snake_case__ : Tuple = self.tokenizer(self.src_text , **snake_case_ , return_tensors="""tf""" )
snake_case__ : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case__ : str = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCamelCase ( self : int ):
self._assert_generated_batch_equal_expected()
| 35 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_lowerCamelCase , n - 1 , _lowerCamelCase ) * a) % mod
else:
_lowerCAmelCase : List[Any] = binary_exponentiation(_lowerCamelCase , n / 2 , _lowerCamelCase )
return (b * b) % mod
# a prime number
_snake_case = 701
_snake_case = 10_0000_0000
_snake_case = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 36 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int = 0 , __magic_name__ : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
UpperCamelCase :Optional[int] = len(__magic_name__ )
while lo < hi:
UpperCamelCase :Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase :Optional[Any] = mid + 1
else:
UpperCamelCase :Optional[int] = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int = 0 , __magic_name__ : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
UpperCamelCase :int = len(__magic_name__ )
while lo < hi:
UpperCamelCase :List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase :str = mid + 1
else:
UpperCamelCase :Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int = 0 , __magic_name__ : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int = 0 , __magic_name__ : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int ) -> int | None:
"""simple docstring"""
UpperCamelCase :List[Any] = 0
UpperCamelCase :Tuple = len(__magic_name__ ) - 1
while left <= right:
UpperCamelCase :Any = left + (right - left) // 2
UpperCamelCase :str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase :str = midpoint - 1
else:
UpperCamelCase :Any = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int ) -> int | None:
"""simple docstring"""
UpperCamelCase :int = bisect.bisect_left(__magic_name__ , __magic_name__ )
if index != len(__magic_name__ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int | None:
"""simple docstring"""
if right < left:
return None
UpperCamelCase :Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 )
else:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = input('''Enter numbers separated by comma:\n''').strip()
UpperCAmelCase_ : List[Any] = sorted(int(item) for item in user_input.split(''','''))
UpperCAmelCase_ : Dict = int(input('''Enter a single number to be found in the list:\n'''))
UpperCAmelCase_ : Optional[Any] = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 38 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_a = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
_a = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = create_model(
'HTSAT-tiny' , 'roberta' , __lowerCAmelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__lowerCAmelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = R'.*sequential.(\d+).*'
_UpperCAmelCase = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
# replace sequential layers with list
_UpperCAmelCase = re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 )
_UpperCAmelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__lowerCAmelCase )//3}.linear.""" )
elif re.match(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase = 1 if projecton_layer == 0 else 2
_UpperCAmelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase = value
_UpperCAmelCase = mixed_qkv.size(0 ) // 3
_UpperCAmelCase = mixed_qkv[:qkv_dim]
_UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase = query_layer
_UpperCAmelCase = key_layer
_UpperCAmelCase = value_layer
else:
_UpperCAmelCase = value
return model_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = init_clap(__lowerCAmelCase , enable_fusion=__lowerCAmelCase )
clap_model.eval()
_UpperCAmelCase = clap_model.state_dict()
_UpperCAmelCase = rename_state_dict(__lowerCAmelCase )
_UpperCAmelCase = ClapConfig()
_UpperCAmelCase = enable_fusion
_UpperCAmelCase = ClapModel(__lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
transformers_config.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
_a = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 39 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__lowercase = """path-to-your-trained-model"""
__lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__lowercase = """A photo of sks dog in a bucket"""
__lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 40 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_A : Optional[Any] =logging.get_logger(__name__)
_A : List[Any] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : int ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : Any ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : Tuple ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[Any] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_A : Union[str, Any] ={
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_A : Union[str, Any] ={
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_A : Optional[int] ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A : Optional[int] ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_A : List[str] ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a = DPRContextEncoderTokenizer
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a = DPRQuestionEncoderTokenizer
_A : Any =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_A : Optional[int] =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_A : List[str] =r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase )
class _lowercase :
def __call__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Optional[str] = None , UpperCamelCase__: Union[bool, str] = False , UpperCamelCase__: Union[bool, str] = False , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: Optional[bool] = None , **UpperCamelCase__: Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
lowerCamelCase__ : Dict = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : List[str] = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
lowerCamelCase__ : int = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
lowerCamelCase__ : str = len(UpperCamelCase__ )
lowerCamelCase__ : List[str] = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.'''
lowerCamelCase__ : Optional[Any] = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
lowerCamelCase__ : Tuple = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
lowerCamelCase__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
lowerCamelCase__ : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase__ : Optional[int] = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: BatchEncoding , UpperCamelCase__: DPRReaderOutput , UpperCamelCase__: int = 16 , UpperCamelCase__: int = 64 , UpperCamelCase__: int = 4 , ):
lowerCamelCase__ : List[Any] = reader_input["""input_ids"""]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = reader_output[:3]
lowerCamelCase__ : Any = len(UpperCamelCase__ )
lowerCamelCase__ : Any = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
lowerCamelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCamelCase__ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase__ : Dict = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : str = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , UpperCamelCase__: int , UpperCamelCase__: int , ):
lowerCamelCase__ : Dict = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase__ : int = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
lowerCamelCase__ : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase__ : Union[str, Any] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase , _lowercase ):
a = VOCAB_FILES_NAMES
a = READER_PRETRAINED_VOCAB_FILES_MAP
a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = READER_PRETRAINED_INIT_CONFIGURATION
a = ["""input_ids""", """attention_mask"""]
a = DPRReaderTokenizer
| 41 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=36 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=6 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = embedding_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_hidden_groups
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , sentence_order_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = AlbertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = AlbertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = AlbertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_choices
_snake_case = AlbertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = True
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AlbertModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = AlbertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AlbertModel.from_pretrained('albert-base-v2' )
_snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
| 42 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase = ['''bert-base-uncased''', '''bert-base-cased''']
__lowercase = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class lowerCamelCase_ ( tf.keras.Model ):
'''simple docstring'''
def __init__( self , __lowercase) -> List[Any]:
super().__init__()
__UpperCamelCase :Dict = tokenizer
__UpperCamelCase :Any = AutoConfig.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = TFAutoModel.from_config(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :str = self.tokenizer(__lowercase)
__UpperCamelCase :Union[str, Any] = self.bert(**__lowercase)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
super().setUp()
__UpperCamelCase :int = [
BertTokenizer.from_pretrained(__lowercase) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCamelCase :List[str] = [TFBertTokenizer.from_pretrained(__lowercase) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__lowercase , use_fast_bert_tokenizer=__lowercase)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__UpperCamelCase :List[Any] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCamelCase :Any = list(zip(self.test_sentences , self.test_sentences[::-1]))
def UpperCamelCase__ ( self) -> Union[str, Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase :Tuple = tokenizer(__lowercase , return_tensors='''tf''' , padding='''longest''')
__UpperCamelCase :Optional[int] = tf_tokenizer(__lowercase)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Optional[Any] = tf_tokenizer(self.paired_sentences)
__UpperCamelCase :Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def UpperCamelCase__ ( self) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :int = tf.function(__lowercase)
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase :Any = tf.constant(__lowercase)
__UpperCamelCase :List[str] = compiled_tokenizer(__lowercase)
__UpperCamelCase :str = tf_tokenizer(__lowercase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def UpperCamelCase__ ( self) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Union[str, Any] = ModelToSave(tokenizer=__lowercase)
__UpperCamelCase :Dict = tf.convert_to_tensor(self.test_sentences)
__UpperCamelCase :int = model(__lowercase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase :int = Path(__lowercase) / '''saved.model'''
model.save(__lowercase)
__UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowercase)
__UpperCamelCase :int = loaded_model(__lowercase)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1E-5)
| 43 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __A :
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : float
_UpperCamelCase : float
_UpperCamelCase : Tuple[int]
def __A ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __A ( self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __A ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __A ( self ):
_lowerCAmelCase : Any = torch.arange(self.height * self.width )
_lowerCAmelCase : Dict = torch.stack(
[
pixel_indices % self.width,
torch.div(a__ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def __A ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Dict = self.shape
_lowerCAmelCase : int = int(np.prod(a__ ) )
_lowerCAmelCase : List[Any] = self.get_image_coords()
_lowerCAmelCase : str = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_lowerCAmelCase : Optional[Any] = self.get_camera_rays(a__ )
_lowerCAmelCase : str = rays.view(a__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __A ( self , a__ ):
_lowerCAmelCase , *_lowerCAmelCase , _lowerCAmelCase : Dict = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase : Optional[Any] = coords.view(a__ , -1 , 2 )
_lowerCAmelCase : Optional[Any] = self.resolution()
_lowerCAmelCase : Any = self.fov()
_lowerCAmelCase : List[Any] = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase : Optional[Any] = fracs * torch.tan(fov / 2 )
_lowerCAmelCase : Optional[Any] = fracs.view(a__ , -1 , 2 )
_lowerCAmelCase : Union[str, Any] = (
self.z.view(a__ , 1 , 3 )
+ self.x.view(a__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a__ , 1 , 3 ) * fracs[:, :, 1:]
)
_lowerCAmelCase : List[Any] = directions / directions.norm(dim=-1 , keepdim=a__ )
_lowerCAmelCase : List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(a__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a__ , *a__ , 2 , 3 )
def __A ( self , a__ , a__ ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a__ , height=a__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> DifferentiableProjectiveCamera:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_lowerCAmelCase : Union[str, Any] = np.array([np.sin(_lowerCamelCase ), np.cos(_lowerCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase : Optional[int] = -z * 4
_lowerCAmelCase : Dict = np.array([np.cos(_lowerCamelCase ), -np.sin(_lowerCamelCase ), 0.0] )
_lowerCAmelCase : int = np.cross(_lowerCamelCase ,_lowerCamelCase )
origins.append(_lowerCamelCase )
xs.append(_lowerCamelCase )
ys.append(_lowerCamelCase )
zs.append(_lowerCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(_lowerCamelCase ,axis=0 ) ).float() ,width=_lowerCamelCase ,height=_lowerCamelCase ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(_lowerCamelCase )) ,)
| 44 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : set ) -> int:
__a , __a = len(lowerCAmelCase__ ), len(grid[0] )
if (
min(lowerCAmelCase__ , lowerCAmelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a = 0
count += depth_first_search(lowerCAmelCase__ , row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , row - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col - 1 , lowerCAmelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger("transformers.models.encodec")
SCREAMING_SNAKE_CASE__ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
SCREAMING_SNAKE_CASE__ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
SCREAMING_SNAKE_CASE__ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
SCREAMING_SNAKE_CASE__ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
SCREAMING_SNAKE_CASE__ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
logger.info(F'{name} was ignored' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
lowerCAmelCase = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase = """weight_v"""
elif "weight_ih_l0" in name:
lowerCAmelCase = """weight_ih_l0"""
elif "weight_hh_l0" in name:
lowerCAmelCase = """weight_hh_l0"""
elif "bias_ih_l0" in name:
lowerCAmelCase = """bias_ih_l0"""
elif "bias_hh_l0" in name:
lowerCAmelCase = """bias_hh_l0"""
elif "weight_ih_l1" in name:
lowerCAmelCase = """weight_ih_l1"""
elif "weight_hh_l1" in name:
lowerCAmelCase = """weight_hh_l1"""
elif "bias_ih_l1" in name:
lowerCAmelCase = """bias_ih_l1"""
elif "bias_hh_l1" in name:
lowerCAmelCase = """bias_hh_l1"""
elif "bias" in name:
lowerCAmelCase = """bias"""
elif "weight" in name:
lowerCAmelCase = """weight"""
elif "running_mean" in name:
lowerCAmelCase = """running_mean"""
elif "running_var" in name:
lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase = """num_batches_tracked"""
else:
lowerCAmelCase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 3_20_00
lowerCAmelCase = 20_48
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 4_80_00
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = """time_group_norm"""
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = EncodecModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint["""best_state"""]
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 46 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
from collections import deque
class A__ :
def __init__( self : List[Any] , _a : str , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =process_name # process name
_SCREAMING_SNAKE_CASE =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_SCREAMING_SNAKE_CASE =arrival_time
_SCREAMING_SNAKE_CASE =burst_time # remaining burst time
_SCREAMING_SNAKE_CASE =0 # total time of the process wait in ready queue
_SCREAMING_SNAKE_CASE =0 # time from arrival time to completion time
class A__ :
def __init__( self : List[str] , _a : int , _a : list[int] , _a : deque[Process] , _a : int , ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =number_of_queues
# time slice of queues that round robin algorithm applied
_SCREAMING_SNAKE_CASE =time_slices
# unfinished process is in this ready_queue
_SCREAMING_SNAKE_CASE =queue
# current time
_SCREAMING_SNAKE_CASE =current_time
# finished process is in this sequence queue
_SCREAMING_SNAKE_CASE =deque()
def A ( self : Union[str, Any] ) -> list[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A ( self : Dict , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A ( self : List[str] , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A ( self : str , _a : list[Process] ) -> list[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A ( self : Tuple , _a : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def A ( self : Optional[int] , _a : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A ( self : Optional[int] , _a : deque[Process] ) -> deque[Process]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque() # sequence deque of finished process
while len(_a ) != 0:
_SCREAMING_SNAKE_CASE =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_SCREAMING_SNAKE_CASE =0
# set the process's turnaround time because it is finished
_SCREAMING_SNAKE_CASE =self.current_time - cp.arrival_time
# set the completion time
_SCREAMING_SNAKE_CASE =self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A ( self : Any , _a : deque[Process] , _a : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_SCREAMING_SNAKE_CASE =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_SCREAMING_SNAKE_CASE =0
# set the finish time
_SCREAMING_SNAKE_CASE =self.current_time
# update the process' turnaround time because it is finished
_SCREAMING_SNAKE_CASE =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A ( self : Any ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase : Tuple = Process("P1", 0, 5_3)
lowerCamelCase : str = Process("P2", 0, 1_7)
lowerCamelCase : Any = Process("P3", 0, 6_8)
lowerCamelCase : Any = Process("P4", 0, 2_4)
lowerCamelCase : Optional[int] = 3
lowerCamelCase : Dict = [1_7, 2_5]
lowerCamelCase : int = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase : Optional[int] = Process("P1", 0, 5_3)
lowerCamelCase : List[str] = Process("P2", 0, 1_7)
lowerCamelCase : Union[str, Any] = Process("P3", 0, 6_8)
lowerCamelCase : List[str] = Process("P4", 0, 2_4)
lowerCamelCase : List[str] = 3
lowerCamelCase : Optional[int] = [1_7, 2_5]
lowerCamelCase : str = deque([Pa, Pa, Pa, Pa])
lowerCamelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 47 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCamelCase : list[Any] = []
lowerCamelCase : int = 0
lowerCamelCase : int = 0
def _lowercase ( self ) -> bool:
return self.head == self.tail
def _lowercase ( self , UpperCamelCase__ ) -> None:
self.data.append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self.tail + 1
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = self.data[self.head]
lowerCamelCase : List[Any] = self.head + 1
return ret
def _lowercase ( self ) -> int:
return self.tail - self.head
def _lowercase ( self ) -> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> None:
lowerCamelCase : str = data
lowerCamelCase : MyNode | None = None
lowerCamelCase : MyNode | None = None
lowerCamelCase : int = 1
def _lowercase ( self ) -> Any:
return self.data
def _lowercase ( self ) -> MyNode | None:
return self.left
def _lowercase ( self ) -> MyNode | None:
return self.right
def _lowercase ( self ) -> int:
return self.height
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : int = data
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : Dict = node
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : Optional[int] = node
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : str = height
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return node.get_height()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
if a > b:
return a
return b
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("left rotation node:" ,node.get_data() )
lowerCamelCase : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("right rotation node:" ,node.get_data() )
lowerCamelCase : str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
lowerCamelCase : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
lowerCamelCase : Any = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> MyNode | None:
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() ,_SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCamelCase : Optional[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase : List[Any] = right_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : List[str] = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() ,_SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCamelCase : List[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase : Optional[Any] = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[Any] = left_rotation(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
lowerCamelCase : List[str] = root.get_right()
if right_child is None:
break
lowerCamelCase : List[str] = right_child
return root.get_data()
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
lowerCamelCase : str = root.get_left()
if left_child is None:
break
lowerCamelCase : List[str] = left_child
return root.get_data()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> MyNode | None:
lowerCamelCase : int = root.get_left()
lowerCamelCase : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase : Dict = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
lowerCamelCase : Union[str, Any] = left_child
elif right_child is not None:
lowerCamelCase : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCamelCase : Union[str, Any] = left_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : int = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCamelCase : Tuple = right_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Any = lr_rotation(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = my_max(get_height(root.get_right() ) ,get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCamelCase : MyNode | None = None
def _lowercase ( self ) -> int:
return get_height(self.root )
def _lowercase ( self , UpperCamelCase__ ) -> None:
print("insert:" + str(UpperCamelCase__ ) )
lowerCamelCase : Dict = insert_node(self.root , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> None:
print("delete:" + str(UpperCamelCase__ ) )
if self.root is None:
print("Tree is empty!" )
return
lowerCamelCase : Tuple = del_node(self.root , UpperCamelCase__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
lowerCamelCase : Tuple = ""
lowerCamelCase : Optional[int] = MyQueue()
q.push(self.root )
lowerCamelCase : str = self.get_height()
if layer == 0:
return output
lowerCamelCase : Any = 0
while not q.is_empty():
lowerCamelCase : List[Any] = q.pop()
lowerCamelCase : Optional[Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase : Dict = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
lowerCamelCase : Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def A ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE__ : Any = AVLtree()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 48 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = "x" , _UpperCAmelCase = 10**-10 , _UpperCAmelCase = 1 , ):
__a = symbols(_UpperCAmelCase )
__a = lambdify(_UpperCAmelCase , _UpperCAmelCase )
__a = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
__a = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
__a = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__a = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 49 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : int = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
_UpperCAmelCase : Any = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = RealmTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Tuple="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Optional[int] , ) -> str:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Optional[Any] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[Any] = tokenize_chinese_chars
lowerCamelCase__ : int = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[int] , UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
lowerCamelCase__ : Optional[int] = text
lowerCamelCase__ : Dict = kwargs.pop('text_pair' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = kwargs.pop('return_tensors' , UpperCAmelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCAmelCase ):
if batch_text_pair is not None:
lowerCamelCase__ : Tuple = batch_text_pair[idx]
else:
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = super().__call__(UpperCAmelCase , UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Any = encoded_candidates.get('input_ids' )
lowerCamelCase__ : Union[str, Any] = encoded_candidates.get('attention_mask' )
lowerCamelCase__ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCAmelCase )
lowerCamelCase__ : int = {key: item for key, item in output_data.items() if len(UpperCAmelCase ) != 0}
return BatchEncoding(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=None ) -> List[str]:
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : int = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from __future__ import annotations
class __snake_case :
def __init__( self : List[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = order
# a_{0} ... a_{k}
UpperCAmelCase_ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase_ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase_ = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase_ = [0.0] * self.order
def lowerCamelCase ( self : Tuple , _snake_case : list[float] , _snake_case : list[float]):
"""simple docstring"""
if len(_snake_case) < self.order:
UpperCAmelCase_ = [1.0, *a_coeffs]
if len(_snake_case) != self.order + 1:
UpperCAmelCase_ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_snake_case)}"""
)
raise ValueError(_snake_case)
if len(_snake_case) != self.order + 1:
UpperCAmelCase_ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_snake_case)}"""
)
raise ValueError(_snake_case)
UpperCAmelCase_ = a_coeffs
UpperCAmelCase_ = b_coeffs
def lowerCamelCase ( self : List[Any] , _snake_case : float):
"""simple docstring"""
UpperCAmelCase_ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase_ = self.input_history[:-1]
UpperCAmelCase_ = self.output_history[:-1]
UpperCAmelCase_ = sample
UpperCAmelCase_ = result
return result
| 51 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Dict = 1
@register_to_config
def __init__( self , A_=2000 , A_=0.1 , A_=20 , A_=1e-3 ):
'''simple docstring'''
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Dict = None
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Dict = torch.linspace(1 , self.config.sampling_eps , A_ , device=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase : int = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase : Optional[Any] = std.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = -score / std
# compute
UpperCamelCase : str = -1.0 / len(self.timesteps )
UpperCamelCase : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase : Optional[int] = beta_t.unsqueeze(-1 )
UpperCamelCase : List[Any] = -0.5 * beta_t * x
UpperCamelCase : List[Any] = torch.sqrt(A_ )
UpperCamelCase : Any = drift - diffusion**2 * score
UpperCamelCase : List[Any] = x + drift * dt
# add noise
UpperCamelCase : str = randn_tensor(x.shape , layout=x.layout , generator=A_ , device=x.device , dtype=x.dtype )
UpperCamelCase : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 52 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a__ : Any = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
a__ : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
a__ : int = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
__SCREAMING_SNAKE_CASE = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 54 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __snake_case ( UpperCAmelCase_ : Dict ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
lowerCamelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCamelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCamelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCamelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCamelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCamelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCamelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCamelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCamelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
lowerCamelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
lowerCamelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCamelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCamelCase_ = key.replace("text_projection" , "flava.text_projection" )
lowerCamelCase_ = key.replace("image_projection" , "flava.image_projection" )
lowerCamelCase_ = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase_ = value
return upgrade
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ):
if config_path is not None:
lowerCamelCase_ = FlavaConfig.from_pretrained(UpperCAmelCase_ )
else:
lowerCamelCase_ = FlavaConfig()
lowerCamelCase_ = FlavaForPreTraining(UpperCAmelCase_ ).eval()
lowerCamelCase_ = convert_dalle_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , save_checkpoint=UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ):
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" )
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" )
lowerCamelCase_ = upgrade_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.load_state_dict(UpperCAmelCase_ )
lowerCamelCase_ = hf_model.state_dict()
lowerCamelCase_ = count_parameters(UpperCAmelCase_ )
lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) + count_parameters(UpperCAmelCase_ )
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a_ : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 55 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a : List[str] = random.Random()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=1.0, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> int:
'''simple docstring'''
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any]=7 , lowercase_ : Any=400 , lowercase_ : Tuple=2000 , lowercase_ : int=1 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=1_6000 , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def A_ ( self : int ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A_ ( self : Tuple , lowercase_ : Any=False , lowercase_ : Dict=False ):
def _flatten(lowercase_ : Tuple ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
snake_case_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = WavaVecaFeatureExtractor
def A_ ( self : Any ):
snake_case_ = WavaVecaFeatureExtractionTester(self )
def A_ ( self : Dict , lowercase_ : Any ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1e-3 ) )
def A_ ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ = np.asarray(lowercase_ )
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
snake_case_ = feat_extract(lowercase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def A_ ( self : Tuple ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ = feat_extract(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = range(800 , 1400 , 200 )
snake_case_ = [floats_list((1, x) )[0] for x in lengths]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 1600, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
snake_case_ = feat_extract(lowercase_ , max_length=lowercase_ , padding=lowercase_ )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : List[str] ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A_ ( self : int ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ = feat_extract(
lowercase_ , truncation=lowercase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
snake_case_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def A_ ( self : Dict ):
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(100 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A_ ( self : Optional[int] ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case_ = WavaVecaConfig.from_pretrained(lowercase_ )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 56 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 400_0000 ) ->int:
_SCREAMING_SNAKE_CASE = [0, 1]
_SCREAMING_SNAKE_CASE = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_SCREAMING_SNAKE_CASE = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy"""
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[Any]=0 , snake_case__ : Any=(4, 4, 64, 64) , snake_case__ : List[Any]=False ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return image
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : str = "bf16" if fpaa else None
snake_case , snake_case : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ )
return model, params
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=(4, 77, 7_68) , snake_case__ : Dict=False ) -> List[str]:
'''simple docstring'''
snake_case : Any = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Any = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case , snake_case : List[str] = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_latents(snake_case__ , fpaa=snake_case__ )
snake_case : List[str] = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ )
snake_case : Dict = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Optional[int] = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
snake_case , snake_case : List[Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ )
snake_case : List[str] = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ )
snake_case : Union[str, Any] = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 10_24) , fpaa=snake_case__ )
snake_case : Optional[int] = model.apply(
{"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample
assert sample.shape == latents.shape
snake_case : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Dict = jnp.array(snake_case__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case__ , snake_case__ , atol=1e-2 )
| 59 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = StableDiffusionXLImgaImgPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowerCAmelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , )
lowerCAmelCase : int = CLIPTextModel(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCamelCase_ )
lowerCAmelCase : str = CLIPTextModelWithProjection(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=0 ):
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = image / 2 + 0.5
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase : Any = torch.manual_seed(UpperCamelCase_ )
else:
lowerCAmelCase : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[Any] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**UpperCamelCase_ )
lowerCAmelCase : Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = sd_pipe(**UpperCamelCase_ ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase : Optional[Any] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : int ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : str ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : int = StableDiffusionXLImgaImgPipeline(**UpperCamelCase_ )
lowerCAmelCase : int = sd_pipe.to(UpperCamelCase_ )
lowerCAmelCase : Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# forward without prompt embeds
lowerCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : Tuple = 3 * ['''this is a negative prompt''']
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Tuple = 3 * [inputs['''prompt''']]
lowerCAmelCase : Union[str, Any] = sd_pipe(**UpperCamelCase_ )
lowerCAmelCase : str = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
lowerCAmelCase : int = 3 * ['''this is a negative prompt''']
lowerCAmelCase : Optional[int] = 3 * [inputs.pop('''prompt''' )]
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = sd_pipe.encode_prompt(UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
lowerCAmelCase : Any = sd_pipe(
**UpperCamelCase_ , prompt_embeds=UpperCamelCase_ , negative_prompt_embeds=UpperCamelCase_ , pooled_prompt_embeds=UpperCamelCase_ , negative_pooled_prompt_embeds=UpperCamelCase_ , )
lowerCAmelCase : str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]="cpu" , UpperCamelCase_ : Optional[int]=torch.floataa , UpperCamelCase_ : int=0 ):
lowerCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase : Dict = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
lowerCAmelCase : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : List[str] = self.get_inputs(UpperCamelCase_ )
lowerCAmelCase : Any = pipe(**UpperCamelCase_ ).images
lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase : Dict = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 60 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""Translation""" ,init=lowercase__ ,repr=lowercase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[List] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""TranslationVariableLanguages""" ,init=lowercase__ ,repr=lowercase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase_ : List[str] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = set(self.languages )
if self.languages and set(lowercase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowercase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowercase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase_ : Tuple = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = zip(*sorted(lowercase_ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 61 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 0 |
import os
def _UpperCAmelCase ( ):
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '/p022_names.txt' ) as file:
__UpperCamelCase =str(file.readlines()[0] )
__UpperCamelCase =names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase =0
__UpperCamelCase =0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase =0
return total_score
if __name__ == "__main__":
print(solution())
| 62 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 63 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spiece.model"}
__a = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__a = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
__a = "▁"
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , snake_case: Union[str, Any] , snake_case: Tuple=True , snake_case: Tuple=True , snake_case: List[str]=False , snake_case: str="[CLS]" , snake_case: Union[str, Any]="[SEP]" , snake_case: Union[str, Any]="<unk>" , snake_case: int="[SEP]" , snake_case: Optional[Any]="<pad>" , snake_case: Dict="[CLS]" , snake_case: Any="[MASK]" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: int , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ :int = (
AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case )
if isinstance(snake_case , snake_case )
else mask_token
)
snake_case_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :Any = do_lower_case
snake_case_ :Optional[int] = remove_space
snake_case_ :Optional[Any] = keep_accents
snake_case_ :List[str] = vocab_file
snake_case_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def lowerCAmelCase_ ( self: int ) -> str:
return len(self.sp_model )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: str ) -> List[str]:
snake_case_ :List[str] = self.__dict__.copy()
snake_case_ :Optional[Any] = None
return state
def __setstate__( self: Optional[int] , snake_case: Union[str, Any] ) -> Dict:
snake_case_ :List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :str = {}
snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] ) -> List[str]:
if self.remove_space:
snake_case_ :str = """ """.join(inputs.strip().split() )
else:
snake_case_ :List[str] = inputs
snake_case_ :Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ :Optional[Any] = unicodedata.normalize("""NFKD""" , snake_case )
snake_case_ :Tuple = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
snake_case_ :Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[str]:
snake_case_ :Tuple = self.preprocess_text(snake_case )
snake_case_ :str = self.sp_model.encode(snake_case , out_type=snake_case )
snake_case_ :List[str] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ :Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ :List[Any] = cur_pieces[1:]
else:
snake_case_ :Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def lowerCAmelCase_ ( self: Optional[int] , snake_case: Union[str, Any] ) -> Any:
return self.sp_model.PieceToId(snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: Dict ) -> str:
return self.sp_model.IdToPiece(snake_case )
def lowerCAmelCase_ ( self: List[str] , snake_case: Dict ) -> Optional[int]:
snake_case_ :str = []
snake_case_ :Tuple = """"""
snake_case_ :Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
snake_case_ :Dict = True
snake_case_ :Dict = []
else:
current_sub_tokens.append(snake_case )
snake_case_ :Dict = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :int = [self.sep_token_id]
snake_case_ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Tuple = [self.sep_token_id]
snake_case_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self: Dict , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 66 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase =TypeVar("KT")
__UpperCAmelCase =TypeVar("VT")
class a__ ( Generic[KT, VT] ):
def __init__( self : int , a : KT | str = "root" , a : VT | None = None ):
"""simple docstring"""
__lowerCamelCase = key
__lowerCamelCase = value
__lowerCamelCase = []
def __repr__( self : Any ):
"""simple docstring"""
return f"""Node({self.key}: {self.value})"""
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return len(self.forward )
class a__ ( Generic[KT, VT] ):
def __init__( self : List[Any] , a : float = 0.5 , a : int = 16 ):
"""simple docstring"""
__lowerCamelCase = Node[KT, VT]()
__lowerCamelCase = 0
__lowerCamelCase = p
__lowerCamelCase = max_level
def __str__( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = list(self )
if len(a ) == 0:
return f"""SkipList(level={self.level})"""
__lowerCamelCase = max((len(str(a ) ) for item in items) , default=4 )
__lowerCamelCase = max(a , 4 ) + 4
__lowerCamelCase = self.head
__lowerCamelCase = []
__lowerCamelCase = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(a , '''-''' ) + '''* ''' * len(a ) )
lines.append(''' ''' * label_size + '''| ''' * len(a ) )
while len(node.forward ) != 0:
__lowerCamelCase = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(a , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(a ) )
__lowerCamelCase = node.forward
lines.append('''None'''.ljust(a ) + '''* ''' * len(a ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(a )
def __iter__( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowerCamelCase = node.forward[0]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowerCamelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : KT ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._locate_node(a )
if node is not None:
for i, update_node in enumerate(a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowerCamelCase = node.forward[i]
else:
__lowerCamelCase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : KT , a : VT ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._locate_node(a )
if node is not None:
__lowerCamelCase = value
else:
__lowerCamelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , a ):
update_vector.append(self.head )
__lowerCamelCase = level
__lowerCamelCase = Node(a , a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(a )
else:
__lowerCamelCase = new_node
def SCREAMING_SNAKE_CASE__ ( self : Any , a : VT ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._locate_node(a )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCamelCase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowerCamelCase = skip_list.head
__lowerCamelCase = {}
while node.level != 0:
__lowerCamelCase = node.forward[0]
__lowerCamelCase = node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __lowerCAmelCase ( ) -> Any:
__lowerCamelCase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowerCamelCase = skip_list.head
__lowerCamelCase = {}
while node.level != 0:
__lowerCamelCase = node.forward[0]
__lowerCamelCase = node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCamelCase = SkipList()
assert skip_list.find('''Some key''' ) is None
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 1_42 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(UpperCamelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ) -> Dict:
def is_sorted(UpperCamelCase__ ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) )
__lowerCamelCase = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase__ , UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase__ ) )
def __lowerCAmelCase ( ) -> int:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 0 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
A__ = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
auth.set_access_token(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = tweepy.API(SCREAMING_SNAKE_CASE_ )
# initialize a list to hold all the tweepy Tweets
A__ = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A__ = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE_ , count=2_0_0 )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE_ )
# save the id of the oldest tweet less one
A__ = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE_ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
A__ = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE_ , count=2_0_0 , max_id=SCREAMING_SNAKE_CASE_ )
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE_ )
# update the id of the oldest tweet less one
A__ = alltweets[-1].id - 1
print(F'...{len(SCREAMING_SNAKE_CASE_ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
A__ = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , "w" ) as f:
A__ = csv.writer(SCREAMING_SNAKE_CASE_ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 68 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "beit"
def __init__( self, lowerCAmelCase__=8192, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=True, lowerCAmelCase__=[3, 5, 7, 11], lowerCAmelCase__=[1, 2, 3, 6], lowerCAmelCase__=True, lowerCAmelCase__=0.4, lowerCAmelCase__=256, lowerCAmelCase__=1, lowerCAmelCase__=False, lowerCAmelCase__=255, **lowerCAmelCase__, ) -> List[str]:
super().__init__(**lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 69 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 70 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =CycleDiffusionPipeline
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
UpperCamelCase__ : Any =PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
UpperCamelCase__ : Optional[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : int =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : Any =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : str ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : int =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Tuple =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ={
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : int =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Dict =pipe(**lowerCamelCase__ )
__UpperCamelCase : int =output.images
__UpperCamelCase : Optional[int] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Any =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__ , 'half' ):
__UpperCamelCase : List[str] =module.half()
__UpperCamelCase : int =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Any =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Dict =output.images
__UpperCamelCase : int =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Optional[int] =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __lowercase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : Union[str, Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__UpperCamelCase : Union[str, Any] =init_image.resize((512, 512) )
__UpperCamelCase : Dict ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : List[str] =CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : str ='A black colored car'
__UpperCamelCase : Optional[int] ='A blue colored car'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : str =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : int =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__UpperCamelCase : Optional[int] =init_image.resize((512, 512) )
__UpperCamelCase : Optional[int] ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Any =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Union[str, Any] ='A black colored car'
__UpperCamelCase : Optional[Any] ='A blue colored car'
__UpperCamelCase : str =torch.manual_seed(0 )
__UpperCamelCase : List[Any] =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : Any =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 71 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def snake_case_ ( A_ : str, A_ : float | Decimal, A_ : float = 10**-10 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = a
while True:
_lowerCamelCase : Union[str, Any] = Decimal(A_ ) - (
Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A_ ) ) < precision: # noqa: S307
return float(A_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 72 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = '''swin2sr'''
_UpperCAmelCase : Dict = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_8_0 ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Tuple=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Any=2.0 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : Any="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 ,SCREAMING_SNAKE_CASE__ : str="1conv" ,SCREAMING_SNAKE_CASE__ : List[str]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Optional[int] = embed_dim
__lowerCamelCase : int = depths
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Optional[int] = mlp_ratio
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = drop_path_rate
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Any = upscale
__lowerCamelCase : Any = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Optional[Any] = upsampler
| 73 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , _lowercase , ) | 74 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
import json
import sys
def lowerCamelCase__ ( _a , _a):
with open(_a , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE : Any = json.load(_a)
SCREAMING_SNAKE_CASE : Any = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_a):
SCREAMING_SNAKE_CASE : str = results[benchmark_name]
SCREAMING_SNAKE_CASE : Optional[int] = benchmark_name.split("/")[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}")
SCREAMING_SNAKE_CASE : str = "| metric |"
SCREAMING_SNAKE_CASE : str = "|--------|"
SCREAMING_SNAKE_CASE : List[Any] = "| new / old (diff) |"
for metric_name in sorted(_a):
SCREAMING_SNAKE_CASE : Optional[int] = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE : Any = metric_vals["new"]
SCREAMING_SNAKE_CASE : Optional[Any] = metric_vals.get("old" , _a)
SCREAMING_SNAKE_CASE : Optional[Any] = metric_vals.get("diff" , _a)
SCREAMING_SNAKE_CASE : int = f" {new_val:f}" if isinstance(_a , (int, float)) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(_a , (int, float)) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(_a , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(_a , "w" , encoding="utf-8") as f:
f.writelines("\n".join(_a))
if __name__ == "__main__":
a_ = sys.argv[1]
a_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 76 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 0 |
"""simple docstring"""
from math import loga
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Tuple=30 , __UpperCAmelCase : int=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=32 , __UpperCAmelCase : List[Any]=True , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size_divisor
_A = do_rescale
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "resample" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 79 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ : Dict = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = []
def parse_line(__A ):
for line in fp:
if isinstance(__A , __A ):
UpperCamelCase__ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
UpperCamelCase__ = "\n".join(__A )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
UpperCamelCase__ = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
UpperCamelCase__ = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return values.split("," )
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a__ : List[Any] = parser.parse_args()
a__ : int = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ : Any = extract_warnings(args.output_dir, args.targets)
a__ : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 80 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.