code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ : ClassVar[Features] = Features({'text': Value('string' )} )
lowerCamelCase__ : ClassVar[Features] = Features({'summary': Value('string' )} )
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "summary"
@property
def a__ (self ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
try:
lowerCamelCase__ : List[Any] = split_input(_lowerCamelCase )
if upper:
lowerCamelCase__ : Any = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCamelCase__ : Union[str, Any] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCamelCase_ ( _lowerCamelCase ):
return to_simple_case(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] = to_simple_case(_lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(_lowerCamelCase , _lowerCamelCase , '_' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return to_complex_case(_lowerCamelCase , _lowerCamelCase , '-' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
class a_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ : Tuple = PandasConfig
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_, (str, list, tuple) ):
lowerCamelCase__ : Optional[int] = data_files
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )]
lowerCamelCase__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase__ : List[Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_, gen_kwargs={'files': files} ) )
return splits
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase__ : Optional[int] = table_cast(lowerCamelCase_, self.config.features.arrow_schema )
return pa_table
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
with open(lowerCamelCase_, 'rb' ) as f:
lowerCamelCase__ : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(lowerCamelCase_ ) )
yield i, self._cast_table(lowerCamelCase_ )
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 400_0000 ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ , lowerCamelCase__ : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : List[str] = b, a + b
return sum(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__ : List[str] = ''
else:
lowerCamelCase__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ : List[str] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = dct.pop(_lowerCamelCase )
lowerCamelCase__ : Tuple = val
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ):
lowerCamelCase__ : int = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCamelCase__ : int = 8
# set labels if required
if not base_model:
lowerCamelCase__ : Optional[int] = 1000
lowerCamelCase__ : Any = 'huggingface/label-files'
lowerCamelCase__ : Optional[int] = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : int = idalabel
lowerCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCamelCase__ : List[Any] = 384
lowerCamelCase__ : Any = 1536
lowerCamelCase__ : str = 12
lowerCamelCase__ : List[str] = 6
# load original model from torch hub
lowerCamelCase__ : Any = torch.hub.load('facebookresearch/dino:main' , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
lowerCamelCase__ : int = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
lowerCamelCase__ : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
lowerCamelCase__ : str = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCamelCase__ : Any = ViTImageProcessor()
lowerCamelCase__ : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Dict = encoding['pixel_values']
lowerCamelCase__ : str = model(_lowerCamelCase )
if base_model:
lowerCamelCase__ : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
lowerCamelCase__ : Union[str, Any] = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
A_ : Optional[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['onnx']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Dict = 1_3
lowerCamelCase__ : Dict = 7
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : Tuple = 9_9
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Any = 3_2
lowerCamelCase__ : str = 2
lowerCamelCase__ : str = 4
lowerCamelCase__ : str = 0.1
lowerCamelCase__ : Optional[int] = 0.1
lowerCamelCase__ : List[Any] = 5_1_2
lowerCamelCase__ : Dict = 1_6
lowerCamelCase__ : Optional[int] = 2
lowerCamelCase__ : str = 0.02
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = 'last'
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[Any] = 0
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.floataa )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_lengths:
lowerCamelCase__ : int = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size], 2, dtype=tf.floataa )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TFFlaubertModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
lowerCamelCase__ : Tuple = [input_ids, input_mask]
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TFFlaubertWithLMHeadModel(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase_ )
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = TFFlaubertForSequenceClassification(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : List[Any] = TFFlaubertForTokenClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_choices
lowerCamelCase__ : List[str] = TFFlaubertForMultipleChoice(config=lowerCamelCase_ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[Any] = config_and_inputs
lowerCamelCase__ : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFFlaubertModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_, emb_dim=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = TFFlaubertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase__ : int = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]], dtype=tf.intaa, ) # "J'aime flaubert !"
lowerCamelCase__ : int = model(lowerCamelCase_ )[0]
lowerCamelCase__ : str = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
# compare the actual values for a slice.
lowerCamelCase__ : str = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase__ : Tuple = (boundary[1] - boundary[0]) / steps
lowerCamelCase__ : List[str] = boundary[0]
lowerCamelCase__ : Tuple = boundary[1]
lowerCamelCase__ : Union[str, Any] = make_points(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = 0.0
y += (h / 2.0) * f(_lowerCamelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCamelCase )
y += (h / 2.0) * f(_lowerCamelCase )
return y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = a + h
while x < (b - h):
yield x
lowerCamelCase__ : str = x + h
def lowerCamelCase_ ( _lowerCamelCase ): # enter your function here
lowerCamelCase__ : Dict = (x - 0) * (x - 0)
return y
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = 0.0 # Lower bound of integration
lowerCamelCase__ : str = 1.0 # Upper bound of integration
lowerCamelCase__ : Tuple = 10.0 # define number of steps or resolution
lowerCamelCase__ : str = [a, b] # define boundary of integration
lowerCamelCase__ : str = method_a(_lowerCamelCase , _lowerCamelCase )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
A_ : str = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
A_ : int = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = CamembertTokenizer
lowerCamelCase__ : Optional[Any] = CamembertTokenizerFast
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Any = True
def a__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = '<pad>'
lowerCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>NOTUSED' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(vocab_keys[-1], '<mask>' )
self.assertEqual(len(lowerCamelCase_ ), 1_0_0_4 )
def a__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_5 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Any = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
lowerCamelCase__ : List[str] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer()
lowerCamelCase__ : str = 'I was born in 92000, and this is falsé.'
lowerCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : str = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCamelCase__ : Dict = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase__ : List[str] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name='camembert-base', revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf', sequences=lowerCamelCase_, )
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : List[Any] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["PerceiverFeatureExtractor"]
A_ : List[str] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : List[Any] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ["OwlViTFeatureExtractor"]
A_ : List[str] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = v.conjugate().T
lowerCamelCase__ : Optional[Any] = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCamelCase__ : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
lowerCamelCase__ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a_ ( snake_case_ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowerCamelCase_, 'depth_multiplier' ) )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3, lowerCamelCase_=3_2, lowerCamelCase_=0.25, lowerCamelCase_=8, lowerCamelCase_=True, lowerCamelCase_=1_0_2_4, lowerCamelCase_=3_2, lowerCamelCase_="relu6", lowerCamelCase_=0.1, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=1_0, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : List[Any] = depth_multiplier
lowerCamelCase__ : Optional[int] = min_depth
lowerCamelCase__ : Union[str, Any] = tf_padding
lowerCamelCase__ : Tuple = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Tuple = output_stride
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : str = classifier_dropout_prob
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ (self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = MobileNetVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Any = MobileNetVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = False
lowerCamelCase__ : int = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileNetVaModelTester(self )
lowerCamelCase__ : List[Any] = MobileNetVaConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV1 does not output attentions' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : int = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = outputs.hidden_states
lowerCamelCase__ : Any = 2_6
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Tuple = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Any = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : int = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Union[str, Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase__ : Any = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A_ : Any = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
A_ : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCamelCase__ : int = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : Optional[Any] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : Tuple = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A_ : Optional[int] = logging.get_logger(__name__)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = question_encoder
lowerCamelCase__ : Optional[int] = generator
lowerCamelCase__ : Optional[Any] = self.question_encoder
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if os.path.isfile(lowerCamelCase_ ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowerCamelCase_, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(lowerCamelCase_, 'question_encoder_tokenizer' )
lowerCamelCase__ : str = os.path.join(lowerCamelCase_, 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowerCamelCase_ )
self.generator.save_pretrained(lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase__ : List[Any] = kwargs.pop('config', lowerCamelCase_ )
if config is None:
lowerCamelCase__ : int = RagConfig.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
lowerCamelCase_, config=config.question_encoder, subfolder='question_encoder_tokenizer' )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(
lowerCamelCase_, config=config.generator, subfolder='generator_tokenizer' )
return cls(question_encoder=lowerCamelCase_, generator=lowerCamelCase_ )
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.current_tokenizer(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.generator.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.generator.decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.question_encoder
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.generator
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = "longest", lowerCamelCase_ = None, lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details', lowerCamelCase_, )
if max_length is None:
lowerCamelCase__ : Dict = self.current_tokenizer.model_max_length
lowerCamelCase__ : Union[str, Any] = self(
lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, max_length=lowerCamelCase_, padding=lowerCamelCase_, truncation=lowerCamelCase_, **lowerCamelCase_, )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase__ : str = self.current_tokenizer.model_max_length
lowerCamelCase__ : Union[str, Any] = self(
text_target=lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_tensors=lowerCamelCase_, padding=lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Tuple = labels['input_ids']
return model_inputs
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'gpt_neo'
lowerCamelCase__ : Optional[Any] = ['past_key_values']
lowerCamelCase__ : List[Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=2_0_4_8, lowerCamelCase_=2_0_4_8, lowerCamelCase_=2_4, lowerCamelCase_=[[["global", "local"], 1_2]], lowerCamelCase_=1_6, lowerCamelCase_=None, lowerCamelCase_=2_5_6, lowerCamelCase_="gelu_new", lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.1, lowerCamelCase_=1e-5, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : List[str] = num_layers
lowerCamelCase__ : Optional[Any] = num_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Dict = window_size
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : List[str] = resid_dropout
lowerCamelCase__ : List[Any] = embed_dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : Tuple = classifier_dropout
lowerCamelCase__ : Tuple = layer_norm_epsilon
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Optional[int] = use_cache
lowerCamelCase__ : Any = bos_token_id
lowerCamelCase__ : Optional[Any] = eos_token_id
lowerCamelCase__ : Optional[int] = attention_types
lowerCamelCase__ : List[Any] = self.expand_attention_types_params(lowerCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
import torch
lowerCamelCase__ : List[Any] = input.size()
lowerCamelCase__ : Dict = len(_lowerCamelCase )
lowerCamelCase__ : Dict = shape[dimension]
lowerCamelCase__ : str = torch.arange(0 , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.div(sizedim - size , _lowerCamelCase , rounding_mode='floor' ) + 1
lowerCamelCase__ : Tuple = torch.arange(_lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : int = [slice(_lowerCamelCase )] * rank
lowerCamelCase__ : Dict = indices
lowerCamelCase__ : List[str] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
import torch
lowerCamelCase__ : int = torch.arange(1 , _lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.remainder(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = remainders == 0
lowerCamelCase__ : List[Any] = candidates[divisor_indices]
lowerCamelCase__ : List[str] = torch.max(_lowerCamelCase )
return largest_divisor, torch.div(_lowerCamelCase , _lowerCamelCase , rounding_mode='floor' )
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_, direction='inputs' )
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCamelCase__ : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a__ (self ):
'''simple docstring'''
return self._config.num_heads
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : Any = super(lowerCamelCase_, self ).generate_dummy_inputs(
lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[str] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Optional[int] = seqlen + 2
lowerCamelCase__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Optional[Any] = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : str = common_inputs['attention_mask']
if self.use_past:
lowerCamelCase__ : List[str] = ordered_inputs['attention_mask'].dtype
lowerCamelCase__ : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase_, lowerCamelCase_, dtype=lowerCamelCase_ )], dim=1 )
return ordered_inputs
@property
def a__ (self ):
'''simple docstring'''
return 1_3
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ : List[str] = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
lowerCamelCase__ : int = os.path.abspath(_lowerCamelCase )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase__ : Dict = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase__ : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
def is_key_or_prefix_key_in_dict(_lowerCamelCase ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase__ : List[Any] = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ : List[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
lowerCamelCase__ : Tuple = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ : List[str] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ : int = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase__ : Union[str, Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase__ : Tuple = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase__ : Dict = pt_tuple_key[-2] + '_v'
if name is not None:
lowerCamelCase__ : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# convert pytorch tensor to numpy
lowerCamelCase__ : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase__ : int = flax_model.params['params']
else:
lowerCamelCase__ : Union[str, Any] = flax_model.params
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : Dict = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : str = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCamelCase__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ : List[Any] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
lowerCamelCase__ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Optional[int] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase__ : str = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : Optional[Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : str = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
import torch
# Load the index
lowerCamelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase__ : List[Any] = torch.load(_lowerCamelCase )
lowerCamelCase__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : Optional[int] = flax_model.params['params']
lowerCamelCase__ : List[Any] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
lowerCamelCase__ : int = flax_model.params
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowerCamelCase__ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ : Dict = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
lowerCamelCase__ : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : str = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase__ : List[str] = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
lowerCamelCase__ : Tuple = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : Dict = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : List[Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = os.path.abspath(_lowerCamelCase )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase__ : int = getattr(_lowerCamelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , 'rb' ) as state_f:
try:
lowerCamelCase__ : Union[str, Any] = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowerCamelCase__ : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowerCamelCase__ : Union[str, Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
lowerCamelCase__ : str = flatten_dict(_lowerCamelCase )
lowerCamelCase__ : List[str] = pt_model.state_dict()
lowerCamelCase__ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase__ : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase__ : Union[str, Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Dict = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : List[Any] = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : List[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase__ : int = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
lowerCamelCase__ : Optional[Any] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase__ : Tuple = '.'.join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase__ : List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase__ : int = key.split('.' )
lowerCamelCase__ : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase__ : List[str] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase__ : List[str] = key_components[-2] + '_v'
if name is not None:
lowerCamelCase__ : str = key_components[:-3] + [name]
lowerCamelCase__ : Tuple = '.'.join(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = key
if flax_key in special_pt_names:
lowerCamelCase__ : Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase__ : Union[str, Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
lowerCamelCase__ : List[Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
lowerCamelCase__ : List[Any] = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCamelCase_ ( _lowerCamelCase ):
if not is_accelerate_available():
return method
lowerCamelCase__ : int = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self , *_lowerCamelCase , **_lowerCamelCase ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *_lowerCamelCase , **_lowerCamelCase )
return wrapper
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = graph
self._normalize_graph(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = len(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = None
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if sources is int:
lowerCamelCase__ : str = [sources]
if sinks is int:
lowerCamelCase__ : Optional[int] = [sinks]
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
return
lowerCamelCase__ : Dict = sources[0]
lowerCamelCase__ : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase_ ) > 1 or len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : str = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCamelCase__ : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0, 0 )
self.graph.insert(0, [0] * size )
for i in sources:
lowerCamelCase__ : str = max_input_flow
lowerCamelCase__ : Any = 0
lowerCamelCase__ : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCamelCase__ : Optional[Any] = max_input_flow
lowerCamelCase__ : Any = size - 1
def a__ (self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = algorithm(self )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = flow_network
lowerCamelCase__ : Tuple = flow_network.verticesCount
lowerCamelCase__ : Dict = flow_network.sourceIndex
lowerCamelCase__ : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCamelCase__ : Dict = flow_network.graph
lowerCamelCase__ : Tuple = False
def a__ (self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCamelCase__ : Optional[int] = True
def a__ (self ):
'''simple docstring'''
pass
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
# use this to save your result
lowerCamelCase__ : str = -1
def a__ (self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCamelCase__ : Dict = [0] * self.verticies_count
lowerCamelCase__ : Any = [0] * self.verticies_count
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCamelCase__ : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCamelCase__ : Union[str, Any] = 0
while i < len(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = vertices_list[i]
lowerCamelCase__ : List[Any] = self.heights[vertex_index]
self.process_vertex(lowerCamelCase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0, vertices_list.pop(lowerCamelCase_ ) )
lowerCamelCase__ : int = 0
else:
i += 1
lowerCamelCase__ : List[Any] = sum(self.preflow[self.source_index] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase_, lowerCamelCase_ )
self.relabel(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = min(
self.excesses[from_index], self.graph[from_index][to_index] - self.preflow[from_index][to_index], )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCamelCase__ : List[str] = self.heights[to_index]
if min_height is not None:
lowerCamelCase__ : int = min_height + 1
if __name__ == "__main__":
A_ : Union[str, Any] = [0]
A_ : Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A_ : Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A_ : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A_ : Optional[int] = flow_network.find_maximum_flow()
print(f"maximum flow is {maximum_flow}")
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=6_4, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Tuple = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Union[str, Any] = scope
def a__ (self ):
'''simple docstring'''
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = MPNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MPNetForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.num_labels
lowerCamelCase__ : List[Any] = MPNetForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_choices
lowerCamelCase__ : Union[str, Any] = MPNetForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Any = MPNetForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Any = config_and_inputs
lowerCamelCase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : str = True
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MPNetModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCamelCase_ )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )[0]
lowerCamelCase__ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : str = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class a_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'dinat'
lowerCamelCase__ : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self, lowerCamelCase_=4, lowerCamelCase_=3, lowerCamelCase_=6_4, lowerCamelCase_=[3, 4, 6, 5], lowerCamelCase_=[2, 4, 8, 1_6], lowerCamelCase_=7, lowerCamelCase_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], lowerCamelCase_=3.0, lowerCamelCase_=True, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.1, lowerCamelCase_="gelu", lowerCamelCase_=0.02, lowerCamelCase_=1e-5, lowerCamelCase_=0.0, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Any = embed_dim
lowerCamelCase__ : List[Any] = depths
lowerCamelCase__ : int = len(lowerCamelCase_ )
lowerCamelCase__ : int = num_heads
lowerCamelCase__ : Tuple = kernel_size
lowerCamelCase__ : Tuple = dilations
lowerCamelCase__ : int = mlp_ratio
lowerCamelCase__ : List[str] = qkv_bias
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : int = drop_path_rate
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : List[str] = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : Optional[int] = ['stem'] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase_ ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_, out_indices=lowerCamelCase_, stage_names=self.stage_names )
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Dict = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'levit'
def __init__(self, lowerCamelCase_=2_2_4, lowerCamelCase_=3, lowerCamelCase_=3, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=1_6, lowerCamelCase_=[1_2_8, 2_5_6, 3_8_4], lowerCamelCase_=[4, 8, 1_2], lowerCamelCase_=[4, 4, 4], lowerCamelCase_=[1_6, 1_6, 1_6], lowerCamelCase_=0, lowerCamelCase_=[2, 2, 2], lowerCamelCase_=[2, 2, 2], lowerCamelCase_=0.02, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Union[str, Any] = kernel_size
lowerCamelCase__ : str = stride
lowerCamelCase__ : Union[str, Any] = padding
lowerCamelCase__ : Any = hidden_sizes
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : str = depths
lowerCamelCase__ : Any = key_dim
lowerCamelCase__ : List[str] = drop_path_rate
lowerCamelCase__ : Dict = patch_size
lowerCamelCase__ : Optional[Any] = attention_ratio
lowerCamelCase__ : Any = mlp_ratio
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : int = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = version.parse('1.11' )
@property
def a__ (self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A_ : Optional[int] = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.', lowerCamelCase_, )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 6008_5147_5143 ):
try:
lowerCamelCase__ : Tuple = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : str = 2
while i * i <= n:
while n % i == 0:
lowerCamelCase__ : Dict = i
n //= i
i += 1
if n > 1:
lowerCamelCase__ : List[str] = n
return int(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : int = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def lowerCamelCase_ ( _lowerCamelCase ):
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase__ : Optional[int] = k.replace(_lowerCamelCase , _lowerCamelCase )
return k
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = DEFAULTS.copy()
cfg_kwargs.update(_lowerCamelCase )
lowerCamelCase__ : List[str] = PegasusConfig(**_lowerCamelCase )
lowerCamelCase__ : List[str] = PegasusForConditionalGeneration(_lowerCamelCase )
lowerCamelCase__ : List[str] = torch_model.model.state_dict()
lowerCamelCase__ : Tuple = {}
for k, v in tf_weights.items():
lowerCamelCase__ : Optional[Any] = rename_state_dict_key(_lowerCamelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowerCamelCase__ : Optional[Any] = v.T
lowerCamelCase__ : Any = torch.tensor(_lowerCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowerCamelCase__ : Union[str, Any] = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
lowerCamelCase__ : List[str] = mapping['shared.weight']
lowerCamelCase__ : Optional[Any] = mapping['shared.weight']
lowerCamelCase__ : List[str] = {k: torch.zeros_like(_lowerCamelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = torch_model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
lowerCamelCase__ : str = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( _lowerCamelCase="./ckpt/aeslc/model.ckpt-32000" ):
lowerCamelCase__ : List[str] = tf.train.list_variables(_lowerCamelCase )
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Optional[Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(_lowerCamelCase , desc='converting tf checkpoint to dict' ):
lowerCamelCase__ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase__ : int = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Dict = array
return tf_weights
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# save tokenizer first
lowerCamelCase__ : List[str] = Path(_lowerCamelCase ).parent.name
lowerCamelCase__ : Dict = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
lowerCamelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_lowerCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCamelCase )
# convert model
lowerCamelCase__ : Optional[Any] = get_tf_weights_as_numpy(_lowerCamelCase )
lowerCamelCase__ : Tuple = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowerCamelCase__ : Dict = task_specific_params
lowerCamelCase__ : int = convert_pegasus(_lowerCamelCase , _lowerCamelCase )
torch_model.save_pretrained(_lowerCamelCase )
lowerCamelCase__ : Dict = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_lowerCamelCase , Path(_lowerCamelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
A_ : Optional[Any] = parser.parse_args()
if args.save_dir is None:
A_ : List[str] = Path(args.tf_ckpt_path).parent.name
A_ : Union[str, Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ : Dict = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print('' )
print(len(_lowerCamelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : List[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : List[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : List[str] = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any = ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
return config, pixel_values
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = FlaxViTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[Any] = (self.image_size, self.image_size)
lowerCamelCase__ : List[str] = (self.patch_size, self.patch_size)
lowerCamelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = FlaxViTForImageClassification(config=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Union[str, Any] = FlaxViTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = FlaxViTModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : str = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_, **lowerCamelCase_ ):
return model(pixel_values=lowerCamelCase_, **lowerCamelCase_ )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ : Optional[Any] = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ : List[str] = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int = model_class_name.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase__ : List[str] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
A_ : set[int] = {ord(char) for char in VALID_CHARS}
A_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = ""
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
lowerCamelCase__ : List[Any] = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_ ( _lowerCamelCase = "p059_cipher.txt" ):
lowerCamelCase__ : list[int]
lowerCamelCase__ : list[str]
lowerCamelCase__ : str
lowerCamelCase__ : str
lowerCamelCase__ : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding='utf-8' )
lowerCamelCase__ : Dict = [int(_lowerCamelCase ) for number in data.strip().split(',' )]
lowerCamelCase__ : Optional[Any] = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ : Optional[int] = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
lowerCamelCase__ : Optional[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
import os
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = len(grid[0] )
lowerCamelCase__ : str = len(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
lowerCamelCase__ : Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCamelCase__ : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCamelCase__ : Optional[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCamelCase__ : Dict = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCamelCase__ : Optional[int] = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
lowerCamelCase__ : str = max_product
return largest
def lowerCamelCase_ ( ):
lowerCamelCase__ : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowerCamelCase__ : int = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ : List[str] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
A_ : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
A_ : int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
], )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase_, lowerCamelCase_ )["wer"]
else:
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Dict = 0
for prediction, reference in zip(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Union[str, Any] = compute_measures(lowerCamelCase_, lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
lowerCamelCase__ : Union[str, Any] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_lowerCamelCase ).content
if __name__ == "__main__":
A_ : Any = input("Enter Video/IGTV url: ").strip()
A_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = 'timm_backbone'
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : str = backbone
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : int = features_only
lowerCamelCase__ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[Any] = out_indices if out_indices is not None else (-1,)
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : List[str] = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
if conf_path is None:
lowerCamelCase__ : str = './model_checkpoints/vqgan_only.yaml'
lowerCamelCase__ : Any = load_config(_lowerCamelCase , display=_lowerCamelCase )
lowerCamelCase__ : int = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ : List[str] = './model_checkpoints/vqgan_only.pt'
lowerCamelCase__ : Dict = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
lowerCamelCase__ : Optional[Any] = sd['state_dict']
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = model.encode(_lowerCamelCase )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ : Dict = model.decode(_lowerCamelCase )
return xrec
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = string.rsplit('.' , 1 )
if reload:
lowerCamelCase__ : Optional[int] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def lowerCamelCase_ ( _lowerCamelCase ):
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
lowerCamelCase__ : List[Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# load the specified checkpoint
if ckpt:
lowerCamelCase__ : str = torch.load(_lowerCamelCase , map_location='cpu' )
lowerCamelCase__ : int = pl_sd['global_step']
print(f'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ : List[Any] = {'state_dict': None}
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : int = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )['model']
return model, global_step
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A_ : Optional[int] = open # noqa: we just need to have a builtin inside this module to test it properly
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_0_0, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : str = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Dict = (image_size // patch_size) ** 2
lowerCamelCase__ : str = num_patches + 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxBeitModel(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.type_sequence_label_size
lowerCamelCase__ : Dict = FlaxBeitForImageClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : int = FlaxBeitForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : str = model(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[Any] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = FlaxBeitModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : str = self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_, **lowerCamelCase_ ):
return model(pixel_values=lowerCamelCase_, **lowerCamelCase_ )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ : int = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ : List[str] = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assertEqual(jitted_output.shape, output.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Dict = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCamelCase__ : Optional[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
lowerCamelCase__ : int = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=lowerCamelCase_, return_tensors='np' ).pixel_values
# prepare bool_masked_pos
lowerCamelCase__ : Union[str, Any] = np.ones((1, 1_9_6), dtype=lowerCamelCase_ )
# forward pass
lowerCamelCase__ : Dict = model(pixel_values=lowerCamelCase_, bool_masked_pos=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = outputs.logits
# verify the logits
lowerCamelCase__ : str = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : List[str] = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], lowerCamelCase_, atol=1e-2 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=lowerCamelCase_, return_tensors='np' )
# forward pass
lowerCamelCase__ : Optional[Any] = model(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = outputs.logits
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : Tuple = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : int = image_processor(images=lowerCamelCase_, return_tensors='np' )
# forward pass
lowerCamelCase__ : Any = model(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase__ : Dict = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : List[str] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase_ )
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[str] = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : int = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
return inputs_dict
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : str = seq_length
lowerCamelCase__ : int = is_training
lowerCamelCase__ : Tuple = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Tuple = embedding_size
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int = None
if self.use_token_type_ids:
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Tuple = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFMobileBertModel(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
lowerCamelCase__ : Dict = [input_ids, input_mask]
lowerCamelCase__ : int = model(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
lowerCamelCase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertForPreTraining(config=lowerCamelCase_ )
lowerCamelCase__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_choices
lowerCamelCase__ : List[Any] = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
lowerCamelCase__ : str = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Union[str, Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase_, atol=1e-4 )
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class a_ ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ (self, lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
lowerCamelCase__ : Tuple = kwargs.pop('main_process_only', lowerCamelCase_ )
lowerCamelCase__ : Dict = kwargs.pop('in_order', lowerCamelCase_ )
if self.isEnabledFor(lowerCamelCase_ ):
if self._should_log(lowerCamelCase_ ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.process(lowerCamelCase_, lowerCamelCase_ )
self.logger.log(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
elif in_order:
lowerCamelCase__ : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.process(lowerCamelCase_, lowerCamelCase_ )
self.logger.log(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
state.wait_for_everyone()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if log_level is None:
lowerCamelCase__ : Optional[int] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
lowerCamelCase__ : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'yolos'
def __init__(self, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=[5_1_2, 8_6_4], lowerCamelCase_=1_6, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=1_0_0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=0.1, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Tuple = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[Any] = qkv_bias
lowerCamelCase__ : List[Any] = num_detection_tokens
lowerCamelCase__ : List[str] = use_mid_position_embeddings
lowerCamelCase__ : int = auxiliary_loss
# Hungarian matcher
lowerCamelCase__ : Tuple = class_cost
lowerCamelCase__ : Union[str, Any] = bbox_cost
lowerCamelCase__ : List[Any] = giou_cost
# Loss coefficients
lowerCamelCase__ : Union[str, Any] = bbox_loss_coefficient
lowerCamelCase__ : Union[str, Any] = giou_loss_coefficient
lowerCamelCase__ : str = eos_coefficient
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = version.parse('1.11' )
@property
def a__ (self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return 1_2
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
'''simple docstring'''
@staticmethod
def a__ (*lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = ObjectDetectionPipeline(model=lowerCamelCase_, image_processor=lowerCamelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png', threshold=0.0 )
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
import datasets
lowerCamelCase__ : List[Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils', 'image', split='test' )
lowerCamelCase__ : Optional[int] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCamelCase__ : Union[str, Any] = object_detector(lowerCamelCase_, threshold=0.0 )
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase_ ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_, {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
}, )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def a__ (self ):
'''simple docstring'''
pass
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCamelCase__ : str = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Any = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
], )
lowerCamelCase__ : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
[
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3_376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'facebook/detr-resnet-50'
lowerCamelCase__ : Dict = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = ObjectDetectionPipeline(model=lowerCamelCase_, feature_extractor=lowerCamelCase_ )
lowerCamelCase__ : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'facebook/detr-resnet-50'
lowerCamelCase__ : List[str] = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
lowerCamelCase__ : Any = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9_982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9_960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9_955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
], )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 0.9_985
lowerCamelCase__ : Dict = 'facebook/detr-resnet-50'
lowerCamelCase__ : Optional[int] = pipeline('object-detection', model=lowerCamelCase_ )
lowerCamelCase__ : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg', threshold=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9_987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
], )
@require_torch
@require_pytesseract
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCamelCase__ : Tuple = 0.9_993
lowerCamelCase__ : str = pipeline('object-detection', model=lowerCamelCase_, threshold=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(lowerCamelCase_, decimals=4 ), [
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
{'score': 0.9_993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
], )
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( _lowerCamelCase ):
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def lowerCamelCase_ ( _lowerCamelCase ):
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( _lowerCamelCase ):
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
A_ : Tuple = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
f"binary trees and {catalan_number(node_count)} binary search trees."
)
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class a_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Optional[int] = pad_token_id
lowerCamelCase__ : List[str] = max_length
lowerCamelCase__ : int = vocab
lowerCamelCase__ : List[str] = merges
lowerCamelCase__ : Optional[Any] = BytePairTokenizer(lowerCamelCase_, lowerCamelCase_, sequence_length=lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [' '.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase__ : Optional[Any] = tokenizer.get_vocab()
return cls(lowerCamelCase_, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
@classmethod
def a__ (cls, lowerCamelCase_ ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tf_tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase__ : int = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase__ , lowerCamelCase__ : Tuple = pad_model_inputs(
lowerCamelCase_, max_seq_length=lowerCamelCase_, pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Any = value
lowerCamelCase__ : Node | None = None # Added in order to delete a node easier
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
def __repr__(self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)}, indent=1 )
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : int = root
def __str__(self ):
'''simple docstring'''
return str(self.root )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCamelCase__ : Tuple = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCamelCase__ : Any = new_children
else:
lowerCamelCase__ : Optional[Any] = new_children
else:
lowerCamelCase__ : int = new_children
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def a__ (self ):
'''simple docstring'''
return self.root is None
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCamelCase__ : List[str] = new_node # set its root
else: # Tree is not empty
lowerCamelCase__ : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCamelCase__ : List[str] = new_node # We insert the new node in a leaf
break
else:
lowerCamelCase__ : int = parent_node.left
else:
if parent_node.right is None:
lowerCamelCase__ : Optional[Any] = new_node
break
else:
lowerCamelCase__ : Tuple = parent_node.right
lowerCamelCase__ : List[str] = parent_node
def a__ (self, *lowerCamelCase_ ):
'''simple docstring'''
for value in values:
self.__insert(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
lowerCamelCase__ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCamelCase__ : Dict = node.left if value < node.value else node.right
return node
def a__ (self, lowerCamelCase_ = None ):
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCamelCase__ : Union[str, Any] = self.root
if not self.empty():
while node.right is not None:
lowerCamelCase__ : int = node.right
return node
def a__ (self, lowerCamelCase_ = None ):
'''simple docstring'''
if node is None:
lowerCamelCase__ : List[Any] = self.root
if self.root is None:
return None
if not self.empty():
lowerCamelCase__ : int = self.root
while node.left is not None:
lowerCamelCase__ : Optional[int] = node.left
return node
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_, lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_, node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_, node.left )
else:
lowerCamelCase__ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCamelCase__ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def a__ (self, lowerCamelCase_=None ):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if node:
self.inorder(lowerCamelCase_, node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_, node.right )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : list[int] = []
self.inorder(lowerCamelCase_, lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = []
if curr_node is not None:
lowerCamelCase__ : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCamelCase__ : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
A_ : List[str] = logging.get_logger(__name__)
@dataclass
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=6.0, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=None, lowerCamelCase_="fp4", lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = load_in_abit
lowerCamelCase__ : Optional[Any] = load_in_abit
lowerCamelCase__ : Optional[Any] = llm_inta_threshold
lowerCamelCase__ : Optional[Any] = llm_inta_skip_modules
lowerCamelCase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowerCamelCase__ : Optional[int] = llm_inta_has_fpaa_weight
lowerCamelCase__ : int = bnb_abit_quant_type
lowerCamelCase__ : List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCamelCase__ : List[str] = torch.floataa
elif isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = getattr(lowerCamelCase_, lowerCamelCase_ )
elif isinstance(lowerCamelCase_, torch.dtype ):
lowerCamelCase__ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def a__ (self ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold, lowerCamelCase_ ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules, lowerCamelCase_ ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload, lowerCamelCase_ ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight, lowerCamelCase_ ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype, torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type, lowerCamelCase_ ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant, lowerCamelCase_ ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def a__ (self ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def a__ (self ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def a__ (cls, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cls(**lowerCamelCase_ )
lowerCamelCase__ : int = []
for key, value in kwargs.items():
if hasattr(lowerCamelCase_, lowerCamelCase_ ):
setattr(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
to_remove.append(lowerCamelCase_ )
for key in to_remove:
kwargs.pop(lowerCamelCase_, lowerCamelCase_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as writer:
lowerCamelCase__ : Optional[int] = self.to_dict()
lowerCamelCase__ : Optional[Any] = json.dumps(lowerCamelCase_, indent=2, sort_keys=lowerCamelCase_ ) + '\n'
writer.write(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[Any] = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__(self ):
'''simple docstring'''
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def a__ (self, lowerCamelCase_ = True ):
'''simple docstring'''
if use_diff is True:
lowerCamelCase__ : str = self.to_diff_dict()
else:
lowerCamelCase__ : int = self.to_dict()
return json.dumps(lowerCamelCase_, indent=2, sort_keys=lowerCamelCase_ ) + "\n"
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.to_dict()
# get the default config dict
lowerCamelCase__ : Union[str, Any] = BitsAndBytesConfig().to_dict()
lowerCamelCase__ : Optional[int] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCamelCase__ : int = value
return serializable_config_dict
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = CodeGenTokenizer
lowerCamelCase__ : Tuple = CodeGenTokenizerFast
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[str] = {'add_prefix_space': True}
lowerCamelCase__ : Union[str, Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase__ : str = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase__ : Optional[int] = {'unk_token': '<unk>'}
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = 'lower newer'
lowerCamelCase__ : int = 'lower newer'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : str = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : str = 'lower newer'
# Testing tokenization
lowerCamelCase__ : Tuple = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids without special tokens
lowerCamelCase__ : Any = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids with special tokens
lowerCamelCase__ : Dict = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Dict = tokenizer.encode(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing the unknown token
lowerCamelCase__ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=1_5 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# Simple input
lowerCamelCase__ : Optional[int] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Tuple = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding='max_length', )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>' )
# Simple input
lowerCamelCase__ : Optional[Any] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase__ : Tuple = ('This is a simple input', 'This is a pair')
lowerCamelCase__ : List[str] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase__ : Union[str, Any] = tokenizer.pad_token_id
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, padding='max_length', max_length=3_0, return_tensors='np' )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
lowerCamelCase__ : Optional[int] = tokenizer(*lowerCamelCase_, padding='max_length', max_length=6_0, return_tensors='np' )
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = '$$$'
lowerCamelCase__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase_, add_bos_token=lowerCamelCase_ )
lowerCamelCase__ : List[str] = 'This is a simple input'
lowerCamelCase__ : Any = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase__ : Dict = tokenizer.bos_token_id
lowerCamelCase__ : Optional[int] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0], lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase__ : Tuple = tokenizer.decode(out_s.input_ids )
lowerCamelCase__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCamelCase__ : str = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowerCamelCase__ : int = '\nif len_a > len_b: result = a\nelse: result = b'
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
lowerCamelCase__ : List[Any] = tokenizer.decode(lowerCamelCase_, truncate_before_pattern=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
A_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCamelCase__ : Any = (
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {', '.join(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
A_ : List[Any] = "https://openaipublic.azureedge.net/jukebox/models/"
A_ : Any = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def lowerCamelCase_ ( _lowerCamelCase ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
lowerCamelCase__ : List[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
lowerCamelCase__ : int = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
lowerCamelCase__ : Optional[Any] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
lowerCamelCase__ : Union[str, Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
lowerCamelCase__ : Optional[int] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
lowerCamelCase__ : Any = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase__ : Tuple = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
lowerCamelCase__ : Union[str, Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {}
import re
lowerCamelCase__ : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCamelCase__ : Tuple = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase__ : List[str] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase__ : int = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCamelCase__ : Any = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase__ : int = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase__ : Union[str, Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
lowerCamelCase__ : Dict = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCamelCase__ : Optional[int] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re_encoder_block_conv_in.match(_lowerCamelCase )
lowerCamelCase__ : List[Any] = regex_match.groups()
lowerCamelCase__ : Dict = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase__ : Any = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase__ : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : int = re_encoder_block_resnet.match(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = regex_match.groups()
lowerCamelCase__ : str = int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase__ : Tuple = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase__ : Optional[int] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase__ : Any = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase__ : int = prefix + resnet_block
lowerCamelCase__ : List[str] = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
lowerCamelCase__ : Dict = regex_match.groups()
lowerCamelCase__ : Optional[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase__ : Dict = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : str = re_decoder_block_conv_out.match(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = regex_match.groups()
lowerCamelCase__ : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase__ : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase__ : List[Any] = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re_decoder_block_resnet.match(_lowerCamelCase )
lowerCamelCase__ : List[str] = regex_match.groups()
lowerCamelCase__ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase__ : Tuple = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase__ : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase__ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase__ : int = prefix + resnet_block
lowerCamelCase__ : Union[str, Any] = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
lowerCamelCase__ : Dict = regex_match.groups()
lowerCamelCase__ : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase__ : Tuple = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = regex_match.groups()
lowerCamelCase__ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase__ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase__ : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : Tuple = re_prior_cond_resnet.match(_lowerCamelCase )
lowerCamelCase__ : Any = regex_match.groups()
lowerCamelCase__ : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase__ : Tuple = {'1': 1, '3': 2}[groups[-2]]
lowerCamelCase__ : List[str] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase__ : Any = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase__ : Optional[Any] = prefix + resnet_block
lowerCamelCase__ : Any = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
lowerCamelCase__ : List[Any] = re_prior_cond_proj_in.match(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = regex_match.groups()
lowerCamelCase__ : List[Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase__ : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
lowerCamelCase__ : Any = original_key
lowerCamelCase__ : str = replace_key(_lowerCamelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
lowerCamelCase__ : Optional[int] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase__ : Any = original_key
lowerCamelCase__ : List[Any] = original_key
lowerCamelCase__ : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase=None , _lowerCamelCase=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase__ : Optional[int] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCamelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCamelCase )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , 'wb' ).write(r.content )
lowerCamelCase__ : Optional[int] = MODEL_MAPPING[model_name.split('/' )[-1]]
lowerCamelCase__ : str = JukeboxConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : int = JukeboxModel(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : int = {}
for i, dict_name in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['model']
lowerCamelCase__ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
lowerCamelCase__ : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase__ : Any = old_dic[k]
else:
lowerCamelCase__ : Any = old_dic[k]
lowerCamelCase__ : str = 'vqvae' if i == 0 else f'''priors.{3 - i}'''
lowerCamelCase__ : Optional[int] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
A_ : Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A_ : Union[str, Any] = {"UserAgent": UserAgent().random}
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = script.contents[0]
lowerCamelCase__ : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = f'''https://www.instagram.com/{username}/'''
lowerCamelCase__ : Dict = self.get_json()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = requests.get(self.url, headers=lowerCamelCase_ ).text
lowerCamelCase__ : str = BeautifulSoup(lowerCamelCase_, 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
'''simple docstring'''
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ):
'''simple docstring'''
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["username"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def a__ (self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase_ ( _lowerCamelCase = "github" ):
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCamelCase__ : Dict = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : List[Any] = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = 1_3, lowerCamelCase_ = 6_4, lowerCamelCase_ = 2, lowerCamelCase_ = 3, lowerCamelCase_ = 3, lowerCamelCase_ = True, lowerCamelCase_ = True, lowerCamelCase_ = 1_2_8, lowerCamelCase_=[1_6, 3_2, 6_4, 1_2_8], lowerCamelCase_ = 7, lowerCamelCase_ = 4, lowerCamelCase_ = 3_7, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 1_0, lowerCamelCase_ = 0.02, lowerCamelCase_ = 2, lowerCamelCase_ = 1, lowerCamelCase_ = 1_2_8, lowerCamelCase_ = [2, 2, 2, 2], lowerCamelCase_ = 2, lowerCamelCase_ = 2, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = encoder_stride
lowerCamelCase__ : Dict = num_attention_outputs
lowerCamelCase__ : Union[str, Any] = embed_dim
lowerCamelCase__ : Tuple = embed_dim + 1
lowerCamelCase__ : Any = resolution
lowerCamelCase__ : str = depths
lowerCamelCase__ : Optional[int] = hidden_sizes
lowerCamelCase__ : Union[str, Any] = dim
lowerCamelCase__ : Any = mlp_expansion_ratio
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TFEfficientFormerModel(config=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.type_sequence_label_size
lowerCamelCase__ : List[Any] = TFEfficientFormerForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : int = model(lowerCamelCase_, labels=lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : List[Any] = TFEfficientFormerForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : List[str] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = TFEfficientFormerModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(
self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(lowerCamelCase_ )
lowerCamelCase__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ), training=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Tuple = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
if hasattr(self.model_tester, 'encoder_seq_length' ):
lowerCamelCase__ : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCamelCase__ : Union[str, Any] = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase__ : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
lowerCamelCase__ : int = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase_, (list, tuple) )
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
lowerCamelCase__ : Tuple = getattr(self.model_tester, 'seq_length', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester, 'decoder_seq_length', lowerCamelCase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = TFEfficientFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = True
lowerCamelCase__ : str = getattr(self.model_tester, 'seq_length', lowerCamelCase_ )
lowerCamelCase__ : int = getattr(self.model_tester, 'encoder_seq_length', lowerCamelCase_ )
lowerCamelCase__ : List[str] = getattr(self.model_tester, 'key_length', lowerCamelCase_ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester, 'chunk_length', lowerCamelCase_ )
if chunk_length is not None and hasattr(self.model_tester, 'num_hashes' ):
lowerCamelCase__ : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[str] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ), training=lowerCamelCase_ )
lowerCamelCase__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ), training=lowerCamelCase_ )
lowerCamelCase__ : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase__ : Tuple = model_class(lowerCamelCase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase__ : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=lowerCamelCase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCamelCase__ : List[str] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : List[Any] = model(**lowerCamelCase_, training=lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Optional[int] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Tuple = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Optional[Any] = model(**lowerCamelCase_, training=lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[str] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ : List[str] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : Tuple = g.get_repo('huggingface/diffusers' )
lowerCamelCase__ : List[str] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Dict = sorted(issue.get_comments() , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''{sampling_rate}'''
lowerCamelCase__ : Union[str, Any] = '1'
lowerCamelCase__ : List[Any] = 'f32le'
lowerCamelCase__ : Optional[int] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase__ : Union[str, Any] = ffmpeg_process.communicate(_lowerCamelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCamelCase__ : Optional[Any] = output_stream[0]
lowerCamelCase__ : str = np.frombuffer(_lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "f32le" , ):
lowerCamelCase__ : Union[str, Any] = f'''{sampling_rate}'''
lowerCamelCase__ : Dict = '1'
if format_for_conversion == "s16le":
lowerCamelCase__ : Optional[int] = 2
elif format_for_conversion == "f32le":
lowerCamelCase__ : Any = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCamelCase__ : int = platform.system()
if system == "Linux":
lowerCamelCase__ : Optional[int] = 'alsa'
lowerCamelCase__ : str = 'default'
elif system == "Darwin":
lowerCamelCase__ : Any = 'avfoundation'
lowerCamelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCamelCase__ : Any = 'dshow'
lowerCamelCase__ : List[Any] = 'default'
lowerCamelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCamelCase__ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase__ : str = _ffmpeg_stream(_lowerCamelCase , _lowerCamelCase )
for item in iterator:
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "f32le" , ):
if stream_chunk_s is not None:
lowerCamelCase__ : Any = stream_chunk_s
else:
lowerCamelCase__ : List[str] = chunk_length_s
lowerCamelCase__ : Any = ffmpeg_microphone(_lowerCamelCase , _lowerCamelCase , format_for_conversion=_lowerCamelCase )
if format_for_conversion == "s16le":
lowerCamelCase__ : int = np.intaa
lowerCamelCase__ : int = 2
elif format_for_conversion == "f32le":
lowerCamelCase__ : Optional[Any] = np.floataa
lowerCamelCase__ : Any = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCamelCase__ : Optional[Any] = chunk_length_s / 6
lowerCamelCase__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCamelCase , (int, float) ):
lowerCamelCase__ : List[Any] = [stride_length_s, stride_length_s]
lowerCamelCase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase__ : Any = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase__ : Tuple = datetime.datetime.now()
lowerCamelCase__ : int = datetime.timedelta(seconds=_lowerCamelCase )
for item in chunk_bytes_iter(_lowerCamelCase , _lowerCamelCase , stride=(stride_left, stride_right) , stream=_lowerCamelCase ):
# Put everything back in numpy scale
lowerCamelCase__ : Optional[int] = np.frombuffer(item['raw'] , dtype=_lowerCamelCase )
lowerCamelCase__ : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCamelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Tuple = b''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCamelCase__ : int = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCamelCase ) < chunk_len:
lowerCamelCase__ : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase__ : str = (_stride_left, stride_right)
lowerCamelCase__ : List[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCamelCase__ : List[Any] = False
yield item
lowerCamelCase__ : List[Any] = stride_left
lowerCamelCase__ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCamelCase ) > stride_left:
lowerCamelCase__ : Optional[Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCamelCase__ : Any = False
yield item
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCamelCase , stdout=subprocess.PIPE , bufsize=_lowerCamelCase ) as ffmpeg_process:
while True:
lowerCamelCase__ : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = -1
lowerCamelCase__ : str = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
lowerCamelCase__ : List[str] = model.generate(lowerCamelCase_, max_new_tokens=1_0, do_sample=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase__ : Union[str, Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_, max_new_tokens=1_0, do_sample=lowerCamelCase_, streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase__ : Tuple = cs.out[:-1]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = -1
lowerCamelCase__ : Union[str, Any] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model.generate(lowerCamelCase_, max_new_tokens=1_0, do_sample=lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.decode(greedy_ids[0] )
lowerCamelCase__ : List[str] = TextIteratorStreamer(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowerCamelCase__ : Optional[Any] = Thread(target=model.generate, kwargs=lowerCamelCase_ )
thread.start()
lowerCamelCase__ : List[str] = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = -1
lowerCamelCase__ : List[Any] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = model.generate(lowerCamelCase_, max_new_tokens=1_0, do_sample=lowerCamelCase_ )
lowerCamelCase__ : Tuple = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase__ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase__ : Tuple = TextStreamer(lowerCamelCase_, skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_, max_new_tokens=1_0, do_sample=lowerCamelCase_, streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase__ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCamelCase__ : str = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = -1
lowerCamelCase__ : Tuple = torch.ones((1, 5), device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase__ : Optional[Any] = TextStreamer(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_, max_new_tokens=1, do_sample=lowerCamelCase_, streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase__ : Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase__ : Dict = tokenizer(lowerCamelCase_, return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = -1
lowerCamelCase__ : List[str] = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
lowerCamelCase__ : int = TextIteratorStreamer(lowerCamelCase_, timeout=0.001 )
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowerCamelCase__ : Any = Thread(target=model.generate, kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : Union[str, Any] = ''
for new_text in streamer:
streamer_text += new_text
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Construct model
if gpta_config_file == "":
lowerCamelCase__ : Any = GPTaConfig()
else:
lowerCamelCase__ : Any = GPTaConfig.from_json_file(_lowerCamelCase )
lowerCamelCase__ : Tuple = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
A_ : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = list(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = list(_lowerCamelCase )
lowerCamelCase__ : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
lowerCamelCase__ : Dict = '_'
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = []
while True:
lowerCamelCase__ : List[Any] = ['$'] * len(_lowerCamelCase )
lowerCamelCase__ : List[str] = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
lowerCamelCase__ : Dict = compare_string(binary[i] , binary[j] )
if k is False:
lowerCamelCase__ : Tuple = '*'
lowerCamelCase__ : Tuple = '*'
temp.append('X' )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
lowerCamelCase__ : Tuple = list(set(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = []
for minterm in minterms:
lowerCamelCase__ : List[str] = ''
for _ in range(_lowerCamelCase ):
lowerCamelCase__ : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = list(_lowerCamelCase )
lowerCamelCase__ : int = list(_lowerCamelCase )
lowerCamelCase__ : Dict = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Union[str, Any] = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Tuple = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
lowerCamelCase__ : str = j
if count == 1:
lowerCamelCase__ : List[Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Dict = 0
temp.append(prime_implicants[i] )
while True:
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = -1
lowerCamelCase__ : Dict = 0
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
lowerCamelCase__ : Dict = count_n
lowerCamelCase__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Optional[Any] = 0
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Tuple = prime_implicants[i].count('_' )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
lowerCamelCase__ : str = 1
return chart
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = int(input('Enter the no. of variables\n' ) )
lowerCamelCase__ : List[Any] = [
float(_lowerCamelCase )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
lowerCamelCase__ : Optional[int] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = check(_lowerCamelCase )
print('Prime Implicants are:' )
print(_lowerCamelCase )
lowerCamelCase__ : Dict = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = selection(_lowerCamelCase , _lowerCamelCase )
print('Essential Prime Implicants are:' )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=8, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
lowerCamelCase__ : Tuple = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
lowerCamelCase__ : int = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Any = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Any = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Tuple = self.get_dummy_components()
lowerCamelCase__ : str = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Tuple = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any = sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : List[str] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any = self.get_dummy_components()
lowerCamelCase__ : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : int = 'french fries'
lowerCamelCase__ : str = sd_pipe(**lowerCamelCase_, negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : List[str] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
lowerCamelCase__ : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = [inputs['prompt']] * 2
lowerCamelCase__ : Any = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
lowerCamelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image / 2 + 0.5
lowerCamelCase__ : Union[str, Any] = image.permute(0, 3, 1, 2 )
lowerCamelCase__ : int = image.repeat(2, 1, 1, 1 )
lowerCamelCase__ : str = sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[int] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCamelCase__ : Union[str, Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear' )
lowerCamelCase__ : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[str] = sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[Any] = [round(lowerCamelCase_, 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Optional[int] = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.get_dummy_components()
lowerCamelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int = VaeImageProcessor(do_resize=lowerCamelCase_, do_normalize=lowerCamelCase_ )
lowerCamelCase__ : Tuple = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase_, input_image_type='pt' ) )[0]
lowerCamelCase__ : Union[str, Any] = components['vae']
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs_by_type(lowerCamelCase_, input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase__ : int = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ )[0]
lowerCamelCase__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase_, 1e-4, 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowerCamelCase__ : Optional[int] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] = self.get_inputs()
lowerCamelCase__ : Dict = pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : str = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] = self.get_inputs()
lowerCamelCase__ : Dict = pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[Any] = self.get_inputs()
lowerCamelCase__ : List[Any] = pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Tuple = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 0
def callback_fn(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ) -> None:
lowerCamelCase__ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase__ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase__ : Union[str, Any] = latents[0, -3:, -3:, -1]
lowerCamelCase__ : Union[str, Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase__ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCamelCase__ : int = latents[0, -3:, -3:, -1]
lowerCamelCase__ : int = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCamelCase_, torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[int] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Dict = self.get_inputs()
pipe(**lowerCamelCase_, callback=lowerCamelCase_, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a__ (self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCamelCase_, torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int = self.get_inputs()
lowerCamelCase__ : Union[str, Any] = pipe(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase__ : Tuple = inputs['image'].resize((5_0_4, 5_0_4) )
lowerCamelCase__ : Optional[Any] = 'timbrooks/instruct-pix2pix'
lowerCamelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase_, safety_checker=lowerCamelCase_, )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Dict = pipe(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = output.images[0]
lowerCamelCase__ : List[str] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCamelCase__ : int = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase = 10**9 ):
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : int = 2
lowerCamelCase__ : int = 0
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase__ : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : List[str] = data_utils.TransfoXLTokenizer
A_ : Tuple = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : List[Any] = data_utils
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCamelCase , 'rb' ) as fp:
lowerCamelCase__ : str = pickle.load(_lowerCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase__ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
lowerCamelCase__ : str = corpus.vocab.__dict__
torch.save(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase__ : Optional[int] = os.path.abspath(_lowerCamelCase )
lowerCamelCase__ : Any = os.path.abspath(_lowerCamelCase )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase__ : int = TransfoXLConfig()
else:
lowerCamelCase__ : Optional[int] = TransfoXLConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : int = TransfoXLLMHeadModel(_lowerCamelCase )
lowerCamelCase__ : int = load_tf_weights_in_transfo_xl(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Save PyTorch model to {os.path.abspath(_lowerCamelCase )}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {os.path.abspath(_lowerCamelCase )}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
A_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=6_4, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=1, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : List[str] = seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Union[str, Any] = use_input_mask
lowerCamelCase__ : str = use_token_type_ids
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Optional[Any] = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Optional[int] = num_choices
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : List[Any] = q_groups
lowerCamelCase__ : int = k_groups
lowerCamelCase__ : List[Any] = v_groups
lowerCamelCase__ : List[Any] = post_attention_groups
lowerCamelCase__ : str = intermediate_groups
lowerCamelCase__ : List[str] = output_groups
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Dict = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_choices
lowerCamelCase__ : Tuple = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Any = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : Any = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = SqueezeBertModelTester(self )
lowerCamelCase__ : int = ConfigTester(self, config_class=lowerCamelCase_, dim=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCamelCase__ : int = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Dict = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0.00
lowerCamelCase__ : int = 0
for resistor in resistors:
if resistor <= 0:
lowerCamelCase__ : Union[str, Any] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowerCamelCase )
first_sum += 1 / float(_lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = 0.00
lowerCamelCase__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCamelCase__ : Optional[int] = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = text, pattern
lowerCamelCase__ , lowerCamelCase__ : List[Any] = len(lowerCamelCase_ ), len(lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCamelCase__ : Tuple = self.mismatch_in_text(lowerCamelCase_ )
if mismatch_index == -1:
positions.append(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = self.match_in_pattern(self.text[mismatch_index] )
lowerCamelCase__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : Dict = "ABAABA"
A_ : Any = "AB"
A_ : Optional[int] = BoyerMooreSearch(text, pattern)
A_ : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = 3
lowerCamelCase__ : int = 2_5_0
lowerCamelCase__ : Dict = ids_tensor((batch_size, length), lowerCamelCase_ )
lowerCamelCase__ : Any = torch.ones((batch_size, length), device=lowerCamelCase_, dtype=torch.float ) / length
return input_ids, scores
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self._get_tensors(5 )
lowerCamelCase__ : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : Dict = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = MaxLengthCriteria(max_length=1_0 )
lowerCamelCase__ , lowerCamelCase__ : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ , lowerCamelCase__ : int = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Any = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 1_0 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self._get_tensors(5 )
lowerCamelCase__ : Tuple = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_, lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_0 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ), 1_1 )
lowerCamelCase__ : Union[str, Any] = validate_stopping_criteria(StoppingCriteriaList(), 1_1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from typing import Any
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validation(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Creates data structures and fill initial step
lowerCamelCase__ : dict = {}
lowerCamelCase__ : dict = {}
for state in states_space:
lowerCamelCase__ : List[Any] = observations_space[0]
lowerCamelCase__ : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__ : str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_lowerCamelCase ) ):
lowerCamelCase__ : Any = observations_space[o]
lowerCamelCase__ : Any = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__ : Union[str, Any] = ''
lowerCamelCase__ : str = -1
for k_state in states_space:
lowerCamelCase__ : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__ : int = probability
lowerCamelCase__ : List[str] = k_state
# Update probabilities and pointers dicts
lowerCamelCase__ : List[Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__ : Any = arg_max
# The final observation
lowerCamelCase__ : List[str] = observations_space[len(_lowerCamelCase ) - 1]
# argmax for given final observation
lowerCamelCase__ : Tuple = ''
lowerCamelCase__ : Optional[int] = -1
for k_state in states_space:
lowerCamelCase__ : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__ : Optional[Any] = probability
lowerCamelCase__ : str = k_state
lowerCamelCase__ : str = arg_max
# Process pointers backwards
lowerCamelCase__ : Optional[int] = last_state
lowerCamelCase__ : str = []
for o in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
result.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validate_not_empty(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
_validate_lists(_lowerCamelCase , _lowerCamelCase )
_validate_dicts(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
_validate_list(_lowerCamelCase , 'observations_space' )
_validate_list(_lowerCamelCase , 'states_space' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_object , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = f'''{var_name} must be a list'''
raise ValueError(_lowerCamelCase )
else:
for x in _object:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = f'''{var_name} must be a list of strings'''
raise ValueError(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
_validate_dict(_lowerCamelCase , 'initial_probabilities' , _lowerCamelCase )
_validate_nested_dict(_lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(_lowerCamelCase , 'emission_probabilities' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
_validate_dict(_object , _lowerCamelCase , _lowerCamelCase )
for x in _object.values():
_validate_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
if not isinstance(_object , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = f'''{var_name} must be a dict'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object ):
lowerCamelCase__ : Optional[Any] = f'''{var_name} all keys must be strings'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object.values() ):
lowerCamelCase__ : Dict = 'nested dictionary ' if nested else ''
lowerCamelCase__ : Dict = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
lowerCamelCase__ : Tuple = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
lowerCamelCase__ : Union[str, Any] = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowerCamelCase__ : int = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
A_ : Dict = [int(item) for item in user_input.split(",")]
A_ : Tuple = int(input("Enter the number to be searched:\n"))
A_ : List[str] = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| 696 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = 'Speech2TextFeatureExtractor'
lowerCamelCase__ : Dict = 'Speech2TextTokenizer'
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
def __call__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase_, **lowerCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCamelCase__ : Optional[int] = kwargs.pop('raw_speech' )
else:
lowerCamelCase__ : int = kwargs.pop('audio', lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = kwargs.pop('sampling_rate', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = kwargs.pop('text', lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
lowerCamelCase__ : List[str] = args[0]
lowerCamelCase__ : Any = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] = self.feature_extractor(lowerCamelCase_, *lowerCamelCase_, sampling_rate=lowerCamelCase_, **lowerCamelCase_ )
if text is not None:
lowerCamelCase__ : List[Any] = self.tokenizer(lowerCamelCase_, **lowerCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ : Tuple = encodings['input_ids']
return inputs
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@contextmanager
def a__ (self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = self.tokenizer
yield
lowerCamelCase__ : Optional[int] = self.feature_extractor
lowerCamelCase__ : List[Any] = False
| 696 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCamelCase_ ( _lowerCamelCase ):
return np.dot(_lowerCamelCase , _lowerCamelCase )
class a_ :
'''simple docstring'''
def __init__(self, *,
lowerCamelCase_ = np.inf, lowerCamelCase_ = "linear", lowerCamelCase_ = 0.0, ):
'''simple docstring'''
lowerCamelCase__ : Any = regularization
lowerCamelCase__ : Optional[int] = gamma
if kernel == "linear":
lowerCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma, (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
lowerCamelCase__ : List[str] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__ : List[Any] = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return np.dot(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = observations
lowerCamelCase__ : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase__) , ) : List[Any] = np.shape(lowerCamelCase_ )
def to_minimize(lowerCamelCase_ ) -> float:
lowerCamelCase__ : Union[str, Any] = 0
((lowerCamelCase__) , ) : Tuple = np.shape(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase_ )
lowerCamelCase__ : Dict = LinearConstraint(lowerCamelCase_, 0, 0 )
lowerCamelCase__ : int = Bounds(0, self.regularization )
lowerCamelCase__ : int = minimize(
lowerCamelCase_, np.ones(lowerCamelCase_ ), bounds=lowerCamelCase_, constraints=[ly_contraint] ).x
lowerCamelCase__ : Union[str, Any] = l_star
# calculating mean offset of separation plane to points
lowerCamelCase__ : int = 0
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j] )
lowerCamelCase__ : Optional[Any] = s / n
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], lowerCamelCase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (PNDMScheduler,)
lowerCamelCase__ : List[Any] = (('num_inference_steps', 50),)
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase_ )
return config
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowerCamelCase__ : List[Any] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = self.dummy_sample
lowerCamelCase__ : int = 0.1 * sample
lowerCamelCase__ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : List[str] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Dict = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
lowerCamelCase__ : Tuple = dummy_past_residuals[:]
lowerCamelCase__ : Tuple = scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : List[str] = new_scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : Union[str, Any] = scheduler.step_plms(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Optional[Any] = new_scheduler.step_plms(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self ):
'''simple docstring'''
pass
def a__ (self, lowerCamelCase_=0, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = dict(self.forward_default_kwargs )
lowerCamelCase__ : Union[str, Any] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.dummy_sample
lowerCamelCase__ : Optional[Any] = 0.1 * sample
lowerCamelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : List[str] = self.get_scheduler_config()
lowerCamelCase__ : Any = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase__ : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
lowerCamelCase__ : Tuple = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase__ : List[Any] = dummy_past_residuals[:]
lowerCamelCase__ : Tuple = scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Tuple = new_scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCamelCase__ : List[str] = scheduler.step_plms(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : List[Any] = new_scheduler.step_plms(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.scheduler_classes[0]
lowerCamelCase__ : List[str] = self.get_scheduler_config(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 1_0
lowerCamelCase__ : Dict = self.dummy_model()
lowerCamelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCamelCase__ : List[str] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = scheduler.step_plms(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
return sample
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCamelCase__ : List[str] = kwargs.pop('num_inference_steps', lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : Optional[int] = self.get_scheduler_config()
lowerCamelCase__ : Dict = scheduler_class(**lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.dummy_sample
lowerCamelCase__ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_, 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_, 'set_timesteps' ):
lowerCamelCase__ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCamelCase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCamelCase__ : str = dummy_past_residuals[:]
lowerCamelCase__ : str = scheduler.step_prk(lowerCamelCase_, 0, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Optional[int] = scheduler.step_prk(lowerCamelCase_, 1, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowerCamelCase__ : Dict = scheduler.step_plms(lowerCamelCase_, 0, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
lowerCamelCase__ : int = scheduler.step_plms(lowerCamelCase_, 1, lowerCamelCase_, **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def a__ (self ):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase__ : str = self.get_scheduler_config(steps_offset=1 )
lowerCamelCase__ : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ), )
def a__ (self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001], [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_, beta_end=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0], [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = 2_7
for scheduler_class in self.scheduler_classes:
lowerCamelCase__ : int = self.dummy_sample
lowerCamelCase__ : str = 0.1 * sample
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Any = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCamelCase__ : Optional[int] = scheduler.step_prk(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : Optional[int] = self.get_scheduler_config()
lowerCamelCase__ : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.full_loop()
lowerCamelCase__ : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1e-2
assert abs(result_mean.item() - 0.2_580 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase__ : Dict = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1e-2
assert abs(result_mean.item() - 0.0_878 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.full_loop(set_alpha_to_one=lowerCamelCase_, beta_start=0.01 )
lowerCamelCase__ : List[str] = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1e-2
assert abs(result_mean.item() - 0.2_995 ) < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.full_loop(set_alpha_to_one=lowerCamelCase_, beta_start=0.01 )
lowerCamelCase__ : str = torch.sum(torch.abs(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1e-2
assert abs(result_mean.item() - 0.2_434 ) < 1e-3
| 696 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['speech']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['speech'] )
| 696 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
A_ : Any = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
A_ : Union[str, Any] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
A_ : Optional[int] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'], )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
lowerCamelCase__ : List[Any] = pearsonr(lowerCamelCase_, lowerCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_, lowerCamelCase_ )[0] )}
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = 0
while number > 0:
lowerCamelCase__ : List[str] = number % 10
sum_of_digits += last_digit
lowerCamelCase__ : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase_ ( _lowerCamelCase = 100 ):
lowerCamelCase__ : Union[str, Any] = factorial(_lowerCamelCase )
lowerCamelCase__ : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 696 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A_ : Optional[int] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A_ : int = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
A_ : str = "|".join(sys.argv[1:])
A_ : Optional[int] = re.compile(rf"^({joined_dirs}).*?\.py$")
A_ : Dict = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 696 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict = "pt"
elif is_tf_available():
A_ : Union[str, Any] = "tf"
else:
A_ : List[str] = "jax"
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = PerceiverTokenizer
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ (self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_=2_0, lowerCamelCase_=5 ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCamelCase__ : Any = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Any = list(filter(lambda lowerCamelCase_ : re.match(r'^[ a-zA-Z]+$', t[1] ), lowerCamelCase_ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCamelCase_ ), lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCamelCase__ : int = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCamelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : int = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Optional[int] = tokenizer.decode(lowerCamelCase_, clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCamelCase__ : List[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = ' ' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = 'Unicode €.'
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_ )
lowerCamelCase__ : Dict = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : int = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Dict = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'], lowerCamelCase_ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ), '[CLS]e è é ê ë[SEP]' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCamelCase__ : List[Any] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCamelCase__ : Optional[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual((2, 3_8), batch.input_ids.shape )
self.assertEqual((2, 3_8), batch.attention_mask.shape )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.perceiver_tokenizer
lowerCamelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase__ : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCamelCase_ )
self.assertIn('attention_mask', lowerCamelCase_ )
self.assertNotIn('decoder_input_ids', lowerCamelCase_ )
self.assertNotIn('decoder_attention_mask', lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'Summary of the text.',
'Another summary.',
]
lowerCamelCase__ : str = tokenizer(
text_target=lowerCamelCase_, max_length=3_2, padding='max_length', truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertEqual(3_2, targets['input_ids'].shape[1] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 4_2 )
# Now let's start the test
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = ' He is very happy, UNwant\u00E9d,running'
lowerCamelCase__ : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : List[str] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 4_2 )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_, model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length, 4_3 )
shutil.rmtree(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), encoding='utf-8' ) as json_file:
lowerCamelCase__ : List[str] = json.load(lowerCamelCase_ )
lowerCamelCase__ : Any = [f'''<extra_id_{i}>''' for i in range(1_2_5 )]
lowerCamelCase__ : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCamelCase__ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCamelCase_, 'special_tokens_map.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_, 'tokenizer_config.json' ), 'w', encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase_, lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
lowerCamelCase_, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCamelCase_ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCamelCase_, additional_special_tokens=lowerCamelCase_, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ), '�' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCamelCase_, do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : int = 192
lowerCamelCase__ : Tuple = 768
lowerCamelCase__ : Any = 12
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : int = [800, 1333]
lowerCamelCase__ : Dict = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Union[str, Any] = 330
lowerCamelCase__ : int = 14
lowerCamelCase__ : List[str] = 6
lowerCamelCase__ : List[Any] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Any = 384
lowerCamelCase__ : List[Any] = 1536
lowerCamelCase__ : Any = 12
lowerCamelCase__ : Optional[int] = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Optional[Any] = [800, 1344]
lowerCamelCase__ : Any = 91
lowerCamelCase__ : Dict = 'huggingface/label-files'
lowerCamelCase__ : Any = 'coco-detection-id2label.json'
lowerCamelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : Optional[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Optional[Any] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( _lowerCamelCase ):
if "backbone" in name:
lowerCamelCase__ : str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowerCamelCase__ : str = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowerCamelCase__ : str = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowerCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCamelCase__ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCamelCase__ : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCamelCase__ : Optional[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCamelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase__ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowerCamelCase__ : List[str] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowerCamelCase__ : Tuple = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowerCamelCase__ : Dict = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : str = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
lowerCamelCase__ : List[Any] = key.split('.' )
lowerCamelCase__ : int = int(key_split[2] )
lowerCamelCase__ : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : str = val[
dim : dim * 2, :
]
lowerCamelCase__ : List[Any] = val[-dim:, :]
else:
lowerCamelCase__ : List[str] = val[:dim]
lowerCamelCase__ : str = val[dim : dim * 2]
lowerCamelCase__ : Dict = val[-dim:]
else:
lowerCamelCase__ : int = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
lowerCamelCase__ : Optional[int] = get_yolos_config(_lowerCamelCase )
# load original state_dict
lowerCamelCase__ : Any = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# load 🤗 model
lowerCamelCase__ : List[str] = YolosForObjectDetection(_lowerCamelCase )
model.eval()
lowerCamelCase__ : List[Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : Union[str, Any] = 800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : int = YolosImageProcessor(format='coco_detection' , size=_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Tuple = model(**_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__ : Dict = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : str = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
lowerCamelCase__ : str = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
lowerCamelCase__ : Any = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : List[str] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
lowerCamelCase__ : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : List[str] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
lowerCamelCase__ : Dict = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
lowerCamelCase__ : Dict = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowerCamelCase__ : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCamelCase , organization='hustvl' )
model.push_to_hub(_lowerCamelCase , organization='hustvl' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Dict = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 696 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A_ : Dict = NewType("DataClass", Any)
A_ : Optional[Any] = NewType("DataClassType", Any)
def lowerCamelCase_ ( _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = {str(_lowerCamelCase ): choice for choice in choices}
return lambda _lowerCamelCase : str_to_choice.get(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( *,
_lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = dataclasses.MISSING , _lowerCamelCase = None , **_lowerCamelCase , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCamelCase__ : Optional[Any] = {}
if aliases is not None:
lowerCamelCase__ : int = aliases
if help is not None:
lowerCamelCase__ : Dict = help
return dataclasses.field(metadata=_lowerCamelCase , default=_lowerCamelCase , default_factory=_lowerCamelCase , **_lowerCamelCase )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Iterable[DataClassType]
def __init__(self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if "formatter_class" not in kwargs:
lowerCamelCase__ : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase_ )
if dataclasses.is_dataclass(lowerCamelCase_ ):
lowerCamelCase__ : int = [dataclass_types]
lowerCamelCase__ : Dict = list(lowerCamelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase_ )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = f'''--{field.name}'''
lowerCamelCase__ : Union[str, Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, lowerCamelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowerCamelCase__ : Dict = kwargs.pop('aliases', [] )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = [aliases]
lowerCamelCase__ : Optional[int] = getattr(field.type, '__origin__', field.type )
if origin_type is Union or (hasattr(lowerCamelCase_, 'UnionType' ) and isinstance(lowerCamelCase_, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f''' Problem encountered in field \'{field.name}\'.''' )
if type(lowerCamelCase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCamelCase__ : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCamelCase__ : Tuple = getattr(field.type, '__origin__', field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCamelCase__ : int = (
field.type.__args__[0] if isinstance(lowerCamelCase_, field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCamelCase__ : Optional[int] = getattr(field.type, '__origin__', field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCamelCase__ : Any = {}
if origin_type is Literal or (isinstance(field.type, lowerCamelCase_ ) and issubclass(field.type, lowerCamelCase_ )):
if origin_type is Literal:
lowerCamelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCamelCase__ : Optional[int] = [x.value for x in field.type]
lowerCamelCase__ : Tuple = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : List[str] = field.default
else:
lowerCamelCase__ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCamelCase__ : Dict = copy(lowerCamelCase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCamelCase__ : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCamelCase__ : Optional[int] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCamelCase__ : Dict = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCamelCase__ : List[Any] = '?'
# This is the value that will get picked if we do --field_name (without value)
lowerCamelCase__ : int = True
elif isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Tuple = field.type.__args__[0]
lowerCamelCase__ : Optional[int] = '+'
if field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : Optional[int] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCamelCase__ : Optional[Any] = True
else:
lowerCamelCase__ : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
lowerCamelCase__ : int = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCamelCase__ : List[str] = field.default_factory()
else:
lowerCamelCase__ : List[Any] = True
parser.add_argument(lowerCamelCase_, *lowerCamelCase_, **lowerCamelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCamelCase__ : Dict = False
parser.add_argument(f'''--no_{field.name}''', action='store_false', dest=field.name, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if hasattr(lowerCamelCase_, '_argument_group_name' ):
lowerCamelCase__ : Tuple = self.add_argument_group(dtype._argument_group_name )
else:
lowerCamelCase__ : Dict = self
try:
lowerCamelCase__ : Dict[str, type] = get_type_hints(lowerCamelCase_ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = '.'.join(map(lowerCamelCase_, sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase_ ):
if not field.init:
continue
lowerCamelCase__ : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase_, lowerCamelCase_ )
def a__ (self, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=None, ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCamelCase__ : Tuple = []
if args_filename:
args_files.append(Path(lowerCamelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCamelCase__ : str = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase_, type=lowerCamelCase_, action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCamelCase__ , lowerCamelCase__ : str = args_file_parser.parse_known_args(args=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = vars(lowerCamelCase_ ).get(args_file_flag.lstrip('-' ), lowerCamelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase_ ) for p in cmd_args_file_paths] )
lowerCamelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCamelCase__ : str = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.parse_known_args(args=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = []
for dtype in self.dataclass_types:
lowerCamelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
lowerCamelCase__ : Union[str, Any] = {k: v for k, v in vars(lowerCamelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = set(args.keys() )
lowerCamelCase__ : List[Any] = []
for dtype in self.dataclass_types:
lowerCamelCase__ : Any = {f.name for f in dataclasses.fields(lowerCamelCase_ ) if f.init}
lowerCamelCase__ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCamelCase__ : List[Any] = dtype(**lowerCamelCase_ )
outputs.append(lowerCamelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase_ )}''' )
return tuple(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
with open(Path(lowerCamelCase_ ), encoding='utf-8' ) as open_json_file:
lowerCamelCase__ : List[str] = json.loads(open_json_file.read() )
lowerCamelCase__ : Optional[Any] = self.parse_dict(lowerCamelCase_, allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.parse_dict(yaml.safe_load(Path(lowerCamelCase_ ).read_text() ), allow_extra_keys=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 696 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = type_sequence_label_size
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : Union[str, Any] = num_choices
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Dict = vocab_size - 1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ (self ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = GPTNeoXModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Optional[Any] = GPTNeoXForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : Optional[int] = GPTNeoXForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[Any] = GPTNeoXForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Tuple = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = GPTNeoXForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ : str = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : List[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
lowerCamelCase__ : Tuple = torch.cat([input_mask, next_mask], dim=-1 )
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, output_hidden_states=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = output_from_no_past['hidden_states'][0]
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_, output_hidden_states=lowerCamelCase_, )['hidden_states'][0]
# select random slice
lowerCamelCase__ : Dict = ids_tensor((1,), output_from_past.shape[-1] ).item()
lowerCamelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : Dict = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = GPTNeoXModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=6_4, num_attention_heads=8 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def a__ (self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = ids_tensor([1, 1_0], config.vocab_size )
lowerCamelCase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Any = GPTNeoXModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
lowerCamelCase__ : List[Any] = original_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase__ : Optional[int] = {'type': scaling_type, 'factor': 10.0}
lowerCamelCase__ : int = GPTNeoXModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
lowerCamelCase__ : Tuple = scaled_model(lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[int] = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
lowerCamelCase__ : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = tokenizer('My favorite food is', return_tensors='pt' ).to(lowerCamelCase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowerCamelCase__ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
lowerCamelCase__ : Dict = model.generate(**lowerCamelCase_, do_sample=lowerCamelCase_, max_new_tokens=2_0 )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )[0]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('only integers accepted as input' )
else:
lowerCamelCase__ : List[Any] = str(abs(_lowerCamelCase ) )
lowerCamelCase__ : Any = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int(''.join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 696 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Union[str, Any] = spec.loader.load_module()
A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : str = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCamelCase__ : Dict = False
# source code of `config_class`
lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__ : Any = True
break
lowerCamelCase__ : Dict = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 696 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
A_ : Dict = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
A_ : Any = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = (images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase__ : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ : Tuple = numpy_to_pil(_lowerCamelCase )
return images
def lowerCamelCase_ ( _lowerCamelCase ):
if images.ndim == 3:
lowerCamelCase__ : str = images[None, ...]
lowerCamelCase__ : str = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase__ : Union[str, Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowerCamelCase__ : str = [Image.fromarray(_lowerCamelCase ) for image in images]
return pil_images
| 696 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
if attention_mask is None:
lowerCamelCase__ : Union[str, Any] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : Tuple = OPTConfig
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Union[str, Any] = 'gelu'
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=9_9, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=2_0, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=1_6, lowerCamelCase_=1_6, ):
'''simple docstring'''
lowerCamelCase__ : int = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : Optional[Any] = pad_token_id
lowerCamelCase__ : str = bos_token_id
lowerCamelCase__ : int = embed_dim
lowerCamelCase__ : List[str] = word_embed_proj_dim
lowerCamelCase__ : Optional[Any] = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCamelCase__ : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCamelCase__ : Tuple = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCamelCase__ : str = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=lowerCamelCase_, **self.config_updates, )
lowerCamelCase__ : List[str] = prepare_opt_inputs_dict(lowerCamelCase_, lowerCamelCase_ )
return config, inputs_dict
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TFOPTModel(config=lowerCamelCase_ )
lowerCamelCase__ : str = inputs_dict['input_ids']
lowerCamelCase__ : int = input_ids[:1, :]
lowerCamelCase__ : List[str] = inputs_dict['attention_mask'][:1, :]
lowerCamelCase__ : Dict = 1
# first forward pass
lowerCamelCase__ : List[str] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, use_cache=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size )
lowerCamelCase__ : Dict = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowerCamelCase__ : Dict = tf.concat([input_ids, next_tokens], axis=-1 )
lowerCamelCase__ : List[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowerCamelCase__ : Union[str, Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowerCamelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_, lowerCamelCase_, rtol=1e-3 )
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : Union[str, Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : Optional[int] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Optional[Any] = 10
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TFOPTModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self, config_class=lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase_, lowerCamelCase_ ):
if hasattr(lowerCamelCase_, 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_, 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCamelCase__ : Optional[int] = model_class(config=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = _get_word_embedding_weight(lowerCamelCase_, model.get_input_embeddings() )
lowerCamelCase__ : Union[str, Any] = _get_word_embedding_weight(lowerCamelCase_, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _get_word_embedding_weight(lowerCamelCase_, model.get_input_embeddings() )
lowerCamelCase__ : str = _get_word_embedding_weight(lowerCamelCase_, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__ : Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], lowerCamelCase_ )
# check that weights remain the same after resizing
lowerCamelCase__ : Union[str, Any] = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__ : List[Any] = False
self.assertTrue(lowerCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], lowerCamelCase_ )
lowerCamelCase__ : str = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__ : Tuple = False
self.assertTrue(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 99
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = tf.ones((4, 1), dtype=tf.intaa ) * 2
lowerCamelCase__ : Optional[int] = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
lowerCamelCase__ : str = input_ids.shape[0]
lowerCamelCase__ : List[str] = OPTConfig(
vocab_size=self.vocab_size, hidden_size=2_4, num_hidden_layers=2, num_attention_heads=2, ffn_dim=3_2, max_position_embeddings=4_8, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCamelCase__ : Dict = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase__ : str = tf.not_equal(lowerCamelCase_, model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__ : List[str] = model(input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_ ).last_hidden_state
lowerCamelCase__ : Optional[Any] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=4e-3 ) )
lowerCamelCase__ : str = tf.function(lowerCamelCase_, jit_compile=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = xla_generate(lowerCamelCase_, lowerCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Dict = 'facebook/opt-350m'
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__ : Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__ : Tuple = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__ : Dict = tokenizer(lowerCamelCase_, return_tensors='tf', padding=lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
lowerCamelCase__ : Any = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
lowerCamelCase__ : int = tf.function(lowerCamelCase_, jit_compile=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'facebook/opt-125m'
lowerCamelCase__ : str = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Dict = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
lowerCamelCase__ : int = tokenizer(lowerCamelCase_, return_tensors='tf' ).input_ids
lowerCamelCase__ : Union[str, Any] = model.generate(lowerCamelCase_, max_length=1_0 )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'facebook/opt-350m'
lowerCamelCase__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 'left'
# use different length sentences to test batching
lowerCamelCase__ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
lowerCamelCase__ : List[str] = tokenizer(lowerCamelCase_, return_tensors='tf', padding=lowerCamelCase_ )
lowerCamelCase__ : List[str] = inputs['input_ids']
lowerCamelCase__ : Optional[int] = model.generate(input_ids=lowerCamelCase_, attention_mask=inputs['attention_mask'] )
lowerCamelCase__ : List[str] = tokenizer(sentences[0], return_tensors='tf' ).input_ids
lowerCamelCase__ : Optional[Any] = model.generate(input_ids=lowerCamelCase_ )
lowerCamelCase__ : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1], tf.intaa ) )
lowerCamelCase__ : List[str] = tokenizer(sentences[1], return_tensors='tf' ).input_ids
lowerCamelCase__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase_, max_length=model.config.max_length - num_paddings )
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Tuple = tokenizer.decode(output_padded[0], skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, [non_padded_sentence, padded_sentence] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'facebook/opt-350m'
lowerCamelCase__ : Union[str, Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
lowerCamelCase__ : Tuple = tokenizer(lowerCamelCase_, return_tensors='tf' ).input_ids
lowerCamelCase__ : int = model.generate(lowerCamelCase_, max_length=1_0 )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
| 696 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
A_ : Optional[int] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
A_ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
A_ : Tuple = BeautifulSoup(res.text, "html.parser")
A_ : Dict = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase__ : Dict = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase__ : Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase__ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_, cache_dir=lowerCamelCase_ )
lowerCamelCase__ : List[str] = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_, os.listdir(lowerCamelCase_ )[0], 'snapshots' ) )]
lowerCamelCase__ : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Any = 4
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : List[Any] = num_samples * [prompt]
lowerCamelCase__ : Optional[int] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : int = replicate(lowerCamelCase_ )
lowerCamelCase__ : Any = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : int = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
lowerCamelCase__ : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='flax', safety_checker=lowerCamelCase_ )
lowerCamelCase__ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : List[str] = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = shard(lowerCamelCase_ )
lowerCamelCase__ : str = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Union[str, Any] = 5_0
lowerCamelCase__ : Any = jax.device_count()
lowerCamelCase__ : Tuple = num_samples * [prompt]
lowerCamelCase__ : List[str] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Any = replicate(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : int = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa )
lowerCamelCase__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Union[str, Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] = 5_0
lowerCamelCase__ : Tuple = jax.device_count()
lowerCamelCase__ : Optional[int] = num_samples * [prompt]
lowerCamelCase__ : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Optional[int] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : List[str] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', set_alpha_to_one=lowerCamelCase_, steps_offset=1, )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, scheduler=lowerCamelCase_, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : List[str] = scheduler.create_state()
lowerCamelCase__ : int = scheduler_state
lowerCamelCase__ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase__ : int = 5_0
lowerCamelCase__ : Optional[Any] = jax.device_count()
lowerCamelCase__ : Any = num_samples * [prompt]
lowerCamelCase__ : Any = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = jax.random.split(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = shard(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
lowerCamelCase__ : int = jax.device_count()
lowerCamelCase__ : Dict = num_samples * [prompt]
lowerCamelCase__ : str = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = replicate(lowerCamelCase_ )
lowerCamelCase__ : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = shard(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : int = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
lowerCamelCase__ , lowerCamelCase__ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='bf16', dtype=jnp.bfloataa, safety_checker=lowerCamelCase_, use_memory_efficient_attention=lowerCamelCase_, )
lowerCamelCase__ : Dict = replicate(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = shard(lowerCamelCase_ )
lowerCamelCase__ : Any = pipeline(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
lowerCamelCase__ : Any = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 696 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase__ : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 696 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ : Dict = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.', lowerCamelCase_, )
super().__init__(args=lowerCamelCase_, **lowerCamelCase_ )
| 696 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ :
'''simple docstring'''
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return None
class a_ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(lowerCamelCase_ ) )
vocab_file.flush()
lowerCamelCase__ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_, 'pt', 1_2, lowerCamelCase_ )
@require_tf
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Optional[Any] = self._test_export(lowerCamelCase_, 'tf', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Any = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def a__ (self ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase__ : Any = self._test_export(lowerCamelCase_, 'pt', 1_2, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase__ : str = Path(lowerCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import BertModel
lowerCamelCase__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'pt' )
@require_tf
@require_tokenizers
@slow
def a__ (self ):
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase__ : Dict = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase__ : Optional[int] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(lowerCamelCase_, lowerCamelCase_, 'tf' )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = FeatureExtractionPipeline(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = infer_shapes(lowerCamelCase_, lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:], lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase__ : Optional[int] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase__ , lowerCamelCase__ : str = ensure_valid_input(FuncContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ), set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase__ , lowerCamelCase__ : Any = ensure_valid_input(FuncNonContiguousArgs(), lowerCamelCase_, lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ), 1 )
self.assertEqual(len(lowerCamelCase_ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 696 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[int] = embedding_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Tuple = scope
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : List[str] = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Dict = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : str = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, next_sentence_label=lowerCamelCase_, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : int = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_choices
lowerCamelCase__ : Dict = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase__ : int = model(
lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = MobileBertModelTester(self )
lowerCamelCase__ : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
A_ : Tuple = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Optional[int] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape, lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
], device=lowerCamelCase_, )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 696 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 696 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BartphoTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[Any] = True
def a__ (self ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : str = ['▁This', '▁is', '▁a', '▁t', 'est']
lowerCamelCase__ : Dict = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
lowerCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file, 'w', encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
lowerCamelCase__ : Optional[Any] = BartphoTokenizer(lowerCamelCase_, self.monolingual_vocab_file, **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = 'This is a là test'
lowerCamelCase__ : Any = 'This is a<unk><unk> test'
return input_text, output_text
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = BartphoTokenizer(lowerCamelCase_, self.monolingual_vocab_file, **self.special_tokens_map )
lowerCamelCase__ : List[str] = 'This is a là test'
lowerCamelCase__ : Dict = '▁This ▁is ▁a ▁l à ▁t est'.split()
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase__ : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
| 696 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'longformer'
def __init__(self, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 1, lowerCamelCase_ = 0, lowerCamelCase_ = 2, lowerCamelCase_ = 3_0_5_2_2, lowerCamelCase_ = 7_6_8, lowerCamelCase_ = 1_2, lowerCamelCase_ = 1_2, lowerCamelCase_ = 3_0_7_2, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 5_1_2, lowerCamelCase_ = 2, lowerCamelCase_ = 0.02, lowerCamelCase_ = 1e-12, lowerCamelCase_ = False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : str = attention_window
lowerCamelCase__ : Union[str, Any] = sep_token_id
lowerCamelCase__ : List[str] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Any = layer_norm_eps
lowerCamelCase__ : Dict = onnx_export
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = "default", lowerCamelCase_ = None ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : Dict = True
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = super().outputs
if self.task == "default":
lowerCamelCase__ : Optional[Any] = {0: 'batch'}
return outputs
@property
def a__ (self ):
'''simple docstring'''
return 1e-4
@property
def a__ (self ):
'''simple docstring'''
return max(super().default_onnx_opset, 1_4 )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = -1, lowerCamelCase_ = -1, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = super().generate_dummy_inputs(
preprocessor=lowerCamelCase_, batch_size=lowerCamelCase_, seq_length=lowerCamelCase_, is_pair=lowerCamelCase_, framework=lowerCamelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Optional[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase__ : List[Any] = 1
return inputs
| 696 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A_ : Union[str, Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ : List[str] = random.Random()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=1.0 , _lowerCamelCase=None , _lowerCamelCase=None ):
if rng is None:
lowerCamelCase__ : List[str] = global_rng
lowerCamelCase__ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=7, lowerCamelCase_=4_0_0, lowerCamelCase_=2_0_0_0, lowerCamelCase_=1_0, lowerCamelCase_=1_6_0, lowerCamelCase_=8, lowerCamelCase_=0.0, lowerCamelCase_=4_0_0_0, lowerCamelCase_=False, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Dict = min_seq_length
lowerCamelCase__ : int = max_seq_length
lowerCamelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : Any = padding_value
lowerCamelCase__ : Dict = sampling_rate
lowerCamelCase__ : List[Any] = return_attention_mask
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Dict = feature_size
lowerCamelCase__ : Optional[int] = chunk_length
lowerCamelCase__ : Optional[int] = hop_length
def a__ (self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ (self, lowerCamelCase_=False, lowerCamelCase_=False ):
'''simple docstring'''
def _flatten(lowerCamelCase_ ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
lowerCamelCase__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : List[str] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = WhisperFeatureExtractor if is_speech_available() else None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = WhisperFeatureExtractionTester(self )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
lowerCamelCase__ : int = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = feat_extract_first.to_dict()
lowerCamelCase__ : Optional[int] = feat_extract_second.to_dict()
lowerCamelCase__ : Union[str, Any] = feat_extract_first.mel_filters
lowerCamelCase__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_, 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase_ )
lowerCamelCase__ : str = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = feat_extract_first.to_dict()
lowerCamelCase__ : str = feat_extract_second.to_dict()
lowerCamelCase__ : str = feat_extract_first.mel_filters
lowerCamelCase__ : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0, 1_4_0_0, 2_0_0 )]
lowerCamelCase__ : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ : Optional[int] = feature_extractor(lowerCamelCase_, padding='max_length', return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ : Tuple = feature_extractor(speech_inputs[0], return_tensors='np' ).input_features
lowerCamelCase__ : List[Any] = feature_extractor(np_speech_inputs[0], return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
# Test batched
lowerCamelCase__ : List[str] = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
lowerCamelCase__ : Dict = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase__ : Optional[Any] = np.asarray(lowerCamelCase_ )
lowerCamelCase__ : Dict = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
lowerCamelCase__ : Optional[Any] = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
# Test truncation required
lowerCamelCase__ : Any = [floats_list((1, x) )[0] for x in range(2_0_0, (feature_extractor.n_samples + 5_0_0), 2_0_0 )]
lowerCamelCase__ : Tuple = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
lowerCamelCase__ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCamelCase__ : Dict = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated]
lowerCamelCase__ : Any = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
lowerCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase_, return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_, lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_, lowerCamelCase_, atol=1e-3 ) )
def a__ (self ):
'''simple docstring'''
import torch
lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : List[Any] = np.random.rand(1_0_0, 3_2 ).astype(np.floataa )
lowerCamelCase__ : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Tuple = feature_extractor.pad([{'input_features': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ : List[Any] = feature_extractor.pad([{'input_features': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation' )
# automatic decoding with librispeech
lowerCamelCase__ : str = ds.sort('id' ).select(range(lowerCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowerCamelCase__ : int = self._load_datasamples(1 )
lowerCamelCase__ : Optional[Any] = WhisperFeatureExtractor()
lowerCamelCase__ : int = feature_extractor(lowerCamelCase_, return_tensors='pt' ).input_features
self.assertEqual(input_features.shape, (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0], lowerCamelCase_, atol=1e-4 ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : List[Any] = self._load_datasamples(1 )[0]
lowerCamelCase__ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
lowerCamelCase__ : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=lowerCamelCase_ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1e-3 ) )
| 696 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : Dict = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Union[str, Any] = scheduler
lowerCamelCase__ : int = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Dict = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Tuple = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Optional[Any] = env.observation_space.shape[0]
lowerCamelCase__ : List[str] = env.action_space.shape[0]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if type(lowerCamelCase_ ) is dict:
return {k: self.to_torch(lowerCamelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase_, device=self.unet.device )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,), lowerCamelCase_, device=self.unet.device, dtype=torch.long )
for _ in range(lowerCamelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : str = self.value_function(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample
lowerCamelCase__ : Union[str, Any] = torch.autograd.grad([y.sum()], [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Optional[int] = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : Tuple = self.unet(x.permute(0, 2, 1 ), lowerCamelCase_ ).sample.permute(0, 2, 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Optional[Any] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, predict_epsilon=lowerCamelCase_ )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Any = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
return x, y
def __call__(self, lowerCamelCase_, lowerCamelCase_=6_4, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=0.1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.normalize(lowerCamelCase_, 'observations' )
lowerCamelCase__ : List[str] = obs[None].repeat(lowerCamelCase_, axis=0 )
lowerCamelCase__ : str = {0: self.to_torch(lowerCamelCase_ )}
lowerCamelCase__ : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(lowerCamelCase_, lowerCamelCase_, self.action_dim )
lowerCamelCase__ : List[str] = self.to_torch(lowerCamelCase_ )
# run the diffusion process
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.run_diffusion(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# sort output trajectories by value
lowerCamelCase__ : Union[str, Any] = y.argsort(0, descending=lowerCamelCase_ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Union[str, Any] = self.de_normalize(lowerCamelCase_, key='actions' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0, lowerCamelCase_ )
lowerCamelCase__ : Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 696 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
lowerCamelCase__ : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCamelCase__ : List[str] = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCamelCase__ : Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCamelCase__ : int = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCamelCase__ : Optional[int] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float64' , [dim] )
lowerCamelCase__ : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCamelCase__ : Any = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCamelCase__ : Union[str, Any] = tf.placeholder('int32' )
lowerCamelCase__ : Dict = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCamelCase__ : Optional[Any] = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCamelCase__ : List[Any] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCamelCase__ : Tuple = tf.placeholder('float' , [dim] )
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float' , [dim] )
lowerCamelCase__ : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCamelCase__ : Union[str, Any] = tf.placeholder('float' , [noofclusters] )
lowerCamelCase__ : Optional[Any] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCamelCase__ : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCamelCase__ : Tuple = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCamelCase__ : Dict = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCamelCase__ : List[str] = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
lowerCamelCase__ : Any = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCamelCase__ : int = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCamelCase__ : Tuple = sess.run(_lowerCamelCase )
lowerCamelCase__ : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 696 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 696 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# Initialise PyTorch model
lowerCamelCase__ : Tuple = BertConfig.from_json_file(_lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase__ : Union[str, Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 696 |
"""simple docstring"""
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : Union[str, Any] = str(file.readlines()[0] )
lowerCamelCase__ : int = names.replace('"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 696 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.