code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = StableDiffusionInstructPixaPixPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ ( self :int ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE : Dict = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :Dict=0 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' )
if str(_lowerCamelCase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[str] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = '''french fries'''
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = output.images
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : int = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Any = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : int = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : int = sd_pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = sd_pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Optional[Any] = [round(_lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInstructPixaPixPipeline(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = VaeImageProcessor(do_resize=_lowerCamelCase , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = components['''vae''']
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs_by_type(_lowerCamelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**_lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCamelCase , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :Tuple=0 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[str] = pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Dict = pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : List[str] = self.get_inputs()
__SCREAMING_SNAKE_CASE : Dict = pipe(**_lowerCamelCase ).images
__SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : str = 0
def callback_fn(_lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__SCREAMING_SNAKE_CASE : str = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__SCREAMING_SNAKE_CASE : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__SCREAMING_SNAKE_CASE : Optional[Any] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Tuple = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs()
pipe(**_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE_ ( self :Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : List[str] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : List[str] = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : Optional[int] = inputs['''image'''].resize((5_0_4, 5_0_4) )
__SCREAMING_SNAKE_CASE : Tuple = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__SCREAMING_SNAKE_CASE : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 674 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any]=True , lowercase_ : Any="pt" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''add_prefix_space''': True} if isinstance(lowercase_ , lowercase_ ) and not line.startswith(''' ''' ) else {}
__SCREAMING_SNAKE_CASE : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowercase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowercase_ , return_tensors=lowercase_ , add_special_tokens=lowercase_ , **lowercase_ , )
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any]=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( __UpperCAmelCase ):
def __init__( self :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Any , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Any="train" , _lowerCamelCase :str=None , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Tuple="" , ):
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
__SCREAMING_SNAKE_CASE : Any = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
__SCREAMING_SNAKE_CASE : Any = self.get_char_lens(self.src_file )
__SCREAMING_SNAKE_CASE : List[str] = max_source_length
__SCREAMING_SNAKE_CASE : Dict = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
__SCREAMING_SNAKE_CASE : Dict = tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = prefix
if n_obs is not None:
__SCREAMING_SNAKE_CASE : Any = self.src_lens[:n_obs]
__SCREAMING_SNAKE_CASE : List[str] = src_lang
__SCREAMING_SNAKE_CASE : str = tgt_lang
def __len__( self :int ):
return len(self.src_lens )
def __getitem__( self :Optional[Any] , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = index + 1 # linecache starts at 1
__SCREAMING_SNAKE_CASE : Any = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__SCREAMING_SNAKE_CASE : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
__SCREAMING_SNAKE_CASE : Dict = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
__SCREAMING_SNAKE_CASE : Dict = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
__SCREAMING_SNAKE_CASE : Any = source_inputs['''input_ids'''].squeeze()
__SCREAMING_SNAKE_CASE : Any = target_inputs['''input_ids'''].squeeze()
__SCREAMING_SNAKE_CASE : Dict = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :Any ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : int = torch.stack([x['''input_ids'''] for x in batch] )
__SCREAMING_SNAKE_CASE : str = torch.stack([x['''attention_mask'''] for x in batch] )
__SCREAMING_SNAKE_CASE : int = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__SCREAMING_SNAKE_CASE : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE : List[str] = trim_batch(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_lowerCamelCase = getLogger(__name__)
def lowerCAmelCase_ ( lowercase_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowercase_ ) )
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = get_git_info()
save_json(lowercase_ , os.path.join(lowercase_ , '''git_log.json''' ) )
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : str=4 , **lowercase_ : List[str] ):
'''simple docstring'''
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=lowercase_ , **lowercase_ )
def lowerCAmelCase_ ( lowercase_ : Union[str, Any] ):
'''simple docstring'''
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = git.Repo(search_parent_directories=lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''repo_id''': str(lowercase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_ ( lowercase_ : Callable , lowercase_ : Iterable ):
'''simple docstring'''
return list(map(lowercase_ , lowercase_ ) )
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : Any ):
'''simple docstring'''
with open(lowercase_ , '''wb''' ) as f:
return pickle.dump(lowercase_ , lowercase_ )
def lowerCAmelCase_ ( lowercase_ : Any ):
'''simple docstring'''
def remove_articles(lowercase_ : Dict ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Any ):
__SCREAMING_SNAKE_CASE : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = normalize_answer(lowercase_ ).split()
__SCREAMING_SNAKE_CASE : Any = normalize_answer(lowercase_ ).split()
__SCREAMING_SNAKE_CASE : Tuple = Counter(lowercase_ ) & Counter(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = sum(common.values() )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE : Any = 1.0 * num_same / len(lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = 1.0 * num_same / len(lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Union[str, Any] ):
'''simple docstring'''
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : List[str] ):
'''simple docstring'''
assert len(lowercase_ ) == len(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for hypo, pred in zip(lowercase_ , lowercase_ ):
em += exact_match_score(lowercase_ , lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__SCREAMING_SNAKE_CASE : Any = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
if not hasattr(lowercase_ , lowercase_ ) and not hasattr(lowercase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase_ ) )
delattr(lowercase_ , lowercase_ )
continue
__SCREAMING_SNAKE_CASE : Optional[int] = p if hasattr(lowercase_ , lowercase_ ) else equivalent_param[p]
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
delattr(lowercase_ , lowercase_ )
return hparams, config
| 674 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = inspect.getfile(accelerate.test_utils )
__snake_case : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__snake_case : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase_ ( self ):
__snake_case : Tuple = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__snake_case : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
'''simple docstring'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = [
[],
[],
[],
]
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(_lowercase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def _lowercase ( self ):
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
"""simple docstring"""
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = []
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase = min(self.queue )
self.queue.remove(_lowercase )
return data
def __str__( self ):
"""simple docstring"""
return str(self.queue )
def A ():
_lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def A ():
_lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 5 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 1 |
import os
from datetime import datetime as dt
from github import Github
a_ : Optional[Any] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Github(os.environ['''GITHUB_TOKEN'''] )
__magic_name__ = g.get_repo('''huggingface/diffusers''' )
__magic_name__ = repo.get_issues(state='''open''' )
for issue in open_issues:
__magic_name__ = sorted(issue.get_comments() , key=lambda snake_case_ : i.created_at , reverse=snake_case_ )
__magic_name__ = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main() | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return " ".join(
''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 678 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
a = False
a = True
a = False
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
a = parser.parse_args()
a = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
a = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
a = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
a = reader.read()
a = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
a = UNetaDModel(**config)
else:
a = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
a = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
a = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
a = config[key]
del config[key]
a = [k.replace('UNetRes', '') for k in config['down_block_types']]
a = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
a = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
a = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
a = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
a = param_value
a = True
if not has_changed:
a = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 350 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """dpr"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase = 0 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : int = position_embedding_type | 630 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 3
UpperCamelCase = 250
UpperCamelCase = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE , dtype=torch.float ) / length
return input_ids, scores
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaxLengthCriteria(max_length=10 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase , UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_tensors(5 )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(SCREAMING_SNAKE_CASE ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
| 414 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
UpperCamelCase = requests.get(SCREAMING_SNAKE_CASE ).content
else:
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
UpperCamelCase = f.read()
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase = ffmpeg_read(SCREAMING_SNAKE_CASE , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase = [text_inputs]
return inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("candidate_labels" )
UpperCamelCase = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = model_outputs.pop("candidate_labels" )
UpperCamelCase = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=0 )
UpperCamelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
UpperCamelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 414 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase__ ):
def __init__( self ,*a_ ,**a_ ):
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' ,_A ,)
super().__init__(*_A ,**_A )
| 193 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = object_name.split('''.''' )
__SCREAMING_SNAKE_CASE : str = 0
# First let's find the module where our object lives.
__SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ):
i += 1
if i < len(snake_case ):
__SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] )
if i >= len(snake_case ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.readlines()
# Now let's find the class / func in the code!
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__SCREAMING_SNAKE_CASE : List[Any] = line_index
while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index]
return "".join(snake_case )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
__SCREAMING_SNAKE_CASE : Dict = 0
while idx < len(snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0
if has_indent:
__SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}'''
__SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = f.readlines()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups()
__SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case )
__SCREAMING_SNAKE_CASE : str = get_indent(snake_case )
__SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
__SCREAMING_SNAKE_CASE : Dict = theoretical_indent
__SCREAMING_SNAKE_CASE : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(snake_case ) and should_continue:
line_index += 1
if line_index >= len(snake_case ):
break
__SCREAMING_SNAKE_CASE : Any = lines[line_index]
__SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index]
__SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None]
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups()
__SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case )
if option.strip() == "all-casing":
__SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code )
__SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__SCREAMING_SNAKE_CASE : str = start_index + 1
if overwrite and len(snake_case ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case )
return diffs
def a__ ( snake_case = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
for filename in all_files:
__SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 74 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> List[Any]:
a : Optional[int] = parent
a : List[Any] = batch_size
a : Dict = image_size
a : Dict = num_channels
a : Optional[int] = num_stages
a : Union[str, Any] = hidden_sizes
a : Union[str, Any] = depths
a : Any = is_training
a : str = use_labels
a : Any = intermediate_size
a : int = hidden_act
a : Optional[Any] = type_sequence_label_size
a : Optional[int] = initializer_range
a : Dict = out_features
a : int = num_labels
a : Union[str, Any] = scope
a : Tuple = num_stages
def lowercase_ ( self ) -> Any:
a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Dict = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : str = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase_ ( self ) -> int:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
a : List[Any] = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a : Any = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase_ ( self ) -> Tuple:
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Optional[Any] = False
lowercase : str = False
lowercase : List[str] = False
lowercase : Union[str, Any] = False
lowercase : Optional[int] = False
lowercase : int = False
def lowercase_ ( self ) -> Union[str, Any]:
a : Dict = UperNetModelTester(self )
a : Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Optional[Any]:
return
def lowercase_ ( self ) -> Optional[Any]:
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(__UpperCAmelCase )
a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : int = [*signature.parameters.keys()]
a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def lowercase_ ( self ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def lowercase_ ( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> int:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
a : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Tuple = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self ) -> List[str]:
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = _config_zero_init(__UpperCAmelCase )
a : List[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
@slow
def lowercase_ ( self ) -> str:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ) -> List[Any]:
a : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
a : Any = Image.open(UpperCAmelCase__ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> str:
a : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
a : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCAmelCase )
a : Union[str, Any] = prepare_img()
a : Optional[int] = processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
a : Union[str, Any] = model(**__UpperCAmelCase )
a : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
a : Optional[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
def lowercase_ ( self ) -> Dict:
a : List[Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
a : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCAmelCase )
a : int = prepare_img()
a : List[str] = processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
with torch.no_grad():
a : str = model(**__UpperCAmelCase )
a : Optional[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
a : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 509 |
"""simple docstring"""
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
a : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCAmelCase__ ) )
return round(UpperCAmelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 509 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__magic_name__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for attribute in key.split("." ):
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
else:
snake_case__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(__lowerCAmelCase )[0].split("." )[-2]
snake_case__ = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
snake_case__ = "weight_g"
elif "weight_v" in name:
snake_case__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
snake_case__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ = "weight"
else:
snake_case__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = full_name.split("conv_layers." )[-1]
snake_case__ = name.split("." )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
# load the pre-trained checkpoints
snake_case__ = torch.load(__lowerCAmelCase )
snake_case__ = WavLMConfigOrig(checkpoint["cfg"] )
snake_case__ = WavLMOrig(__lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
snake_case__ = WavLMConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = WavLMConfig()
snake_case__ = WavLMModel(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavlm.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__magic_name__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 276 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
snake_case__ = "ZinengTang/tvlt-base"
snake_case__ = tempfile.mkdtemp()
def A_ ( self , **lowerCamelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self , **lowerCamelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(lowerCamelCase , return_tensors="np" )
snake_case__ = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A_ ( self ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 276 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Any = """▁"""
lowercase : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowercase : Any = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowercase : str = {"""vinai/bartpho-syllable""": 1024}
class __A( __UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["""input_ids""", """attention_mask"""]
def __init__( self, A, A, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A = None, **A, ):
"""simple docstring"""
_UpperCamelCase = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A, eos_token=A, unk_token=A, sep_token=A, cls_token=A, pad_token=A, mask_token=A, sp_model_kwargs=self.sp_model_kwargs, **A, )
_UpperCamelCase = vocab_file
_UpperCamelCase = monolingual_vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCamelCase = {}
_UpperCamelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A ) not in self.fairseq_tokens_to_ids:
_UpperCamelCase = cnt
cnt += 1
with open(A, '''r''', encoding='''utf-8''' ) as f:
for line in f.readlines():
_UpperCamelCase = line.strip().split()[0]
_UpperCamelCase = len(self.fairseq_tokens_to_ids )
if str(A ) not in self.fairseq_tokens_to_ids:
_UpperCamelCase = len(self.fairseq_tokens_to_ids )
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, A ):
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A, token_ids_a=A, already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.sp_model.encode(A, out_type=A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = ''''''.join(A ).replace(A, ''' ''' ).strip()
return out_string
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A )
elif not os.path.isfile(self.vocab_file ):
with open(A, '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A, '''w''', encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 716 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Tuple = {"""vocab_file""": """vocab.json"""}
lowercase : int = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowercase : int = {"""mgp-str""": 27}
class __A( __UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A, A="[GO]", A="[GO]", A="[s]", A="[GO]", **A ):
"""simple docstring"""
super().__init__(
unk_token=A, bos_token=A, eos_token=A, pad_token=A, **A, )
with open(A, encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(A )
_UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return len(self.vocab )
def _UpperCamelCase ( self ):
"""simple docstring"""
return dict(self.vocab, **self.added_tokens_encoder )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = []
for s in text:
char_tokens.extend(A )
return char_tokens
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.vocab.get(A, self.vocab.get(self.unk_token ) )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
return self.decoder.get(A )
def _UpperCamelCase ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
_UpperCamelCase = os.path.join(
A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(A, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=A, ensure_ascii=A ) + '''\n''' )
return (vocab_file,)
| 105 | 0 |
import warnings
from .generation import TFGenerationMixin
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __UpperCAmelCase , )
| 234 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = [5, 1_1, 1_7, 2_3]
SCREAMING_SNAKE_CASE_ = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
SCREAMING_SNAKE_CASE_ = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 1_5_0
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def a (_lowerCAmelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE_ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE_ = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a (_lowerCAmelCase , _lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a ():
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE_ = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
SCREAMING_SNAKE_CASE_ = DPTImageProcessor(size=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE_ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCAmelCase )
)
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ):
__a : Optional[int] = 0
if start < end:
__a : int = randint(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = a[end]
__a : str = a[pivot]
__a : Any = temp
__a , __a : int = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ):
__a : Union[str, Any] = 0
__a : int = randint(lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = a[end]
__a : Optional[int] = a[pivot]
__a : int = temp
__a : List[str] = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__a : Union[str, Any] = new_pivot_index + 1
__a : str = a[new_pivot_index]
__a : Any = a[index]
__a : Dict = temp
__a : Optional[int] = a[new_pivot_index + 1]
__a : List[str] = a[end]
__a : Tuple = temp
return new_pivot_index + 1, count
lowercase__ =TemporaryFile()
lowercase__ =100 # 1000 elements are to be sorted
lowercase__ , lowercase__ =0, 1 # mean and standard deviation
lowercase__ =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowercase__ =np.load(outfile)
lowercase__ =len(M) - 1
lowercase__ =_in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 326 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"text": Value("string" )} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({} )
_SCREAMING_SNAKE_CASE : str = "text"
@property
def lowerCAmelCase (self : str ):
return {self.text_column: "text"}
| 326 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
A_ = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
A_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _snake_case ( _a ):
_A : List[Any] = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : str = PRETRAINED_INIT_CONFIGURATION
_A : int = ['''input_ids''', '''attention_mask''']
_A : Union[str, Any] = DistilBertTokenizer
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="[UNK]" ,SCREAMING_SNAKE_CASE__ : int="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[PAD]" ,SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" ,SCREAMING_SNAKE_CASE__ : List[str]="[MASK]" ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE:Tuple = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE:Tuple = do_lower_case
SCREAMING_SNAKE_CASE:int = strip_accents
SCREAMING_SNAKE_CASE:Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE:List[str] = normalizer_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = do_lower_case
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple=None ):
SCREAMING_SNAKE_CASE:int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE:Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
SCREAMING_SNAKE_CASE:str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 143 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Tuple = '''lilt'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: str=30522 , _SCREAMING_SNAKE_CASE: int=768 , _SCREAMING_SNAKE_CASE: Union[str, Any]=12 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: Optional[int]=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=512 , _SCREAMING_SNAKE_CASE: Dict=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=1e-12 , _SCREAMING_SNAKE_CASE: str=0 , _SCREAMING_SNAKE_CASE: Dict="absolute" , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: int=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1024 , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = classifier_dropout
UpperCamelCase_ = channel_shrink_ratio
UpperCamelCase_ = max_ad_position_embeddings
| 717 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = XLMTokenizer
_UpperCamelCase : Optional[int] = False
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = "lower newer"
return input_text, output_text
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_ = "lower"
UpperCamelCase_ = ["low", "er</w>"]
UpperCamelCase_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokens + ["<unk>"]
UpperCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCamelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 371 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 100 , ):
snake_case_ = x_start
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
snake_case_ = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case_ = (x_end - x_start) / steps + xa
snake_case_ = fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case_ = xa
snake_case_ = fxa
return length
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase_ = 10
while i <= 10_00_00:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 39 |
import cmath
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
snake_case_ = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase ={
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 543 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase ="path-to-your-trained-model"
UpperCamelCase =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
UpperCamelCase ="A photo of sks dog in a bucket"
UpperCamelCase =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 543 | 1 |
'''simple docstring'''
snake_case_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowercase ():
SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter message: ''' )
SCREAMING_SNAKE_CASE : Any = input('''Enter key [alphanumeric]: ''' )
SCREAMING_SNAKE_CASE : Any = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
SCREAMING_SNAKE_CASE : Optional[int] = '''encrypt'''
SCREAMING_SNAKE_CASE : Union[str, Any] = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
SCREAMING_SNAKE_CASE : List[str] = '''decrypt'''
SCREAMING_SNAKE_CASE : str = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCamelCase__ )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ):
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''encrypt''' )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ):
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''decrypt''' )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Optional[int] = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCamelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = 0
else:
translated.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 507 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''lxmert'''
snake_case_ = {}
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=9_500 , lowerCamelCase__=1_600 , lowerCamelCase__=400 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=9 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=2_048 , lowerCamelCase__=4 , lowerCamelCase__=6.67 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**lowerCamelCase__ )
| 469 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Any=32 , snake_case__ : Any=5 , snake_case__ : int=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : int=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=4 , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class A_ ( __a , unittest.TestCase ):
_A :List[Any] = True
_A :Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowercase = FlaxRoFormerModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=snake_case__ )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase = model(snake_case__ )[0]
lowercase = 5_00_00
lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape , snake_case__ )
lowercase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 72 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class A_ ( __a ):
_A :List[str] = '''pix2struct_text_model'''
_A :int = ['''past_key_values''']
_A :Optional[Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ):
lowercase = vocab_size
lowercase = hidden_size
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = use_cache
lowercase = eos_token_id
lowercase = decoder_start_token_id
# for backwards compatibility
lowercase = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :Optional[int] = '''pix2struct_vision_model'''
def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ )
lowercase = hidden_size
lowercase = patch_embed_hidden_size
lowercase = d_ff
lowercase = dropout_rate
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = initializer_factor
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = dense_act_fn
lowercase = seq_len
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :int = '''pix2struct'''
_A :str = True
def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowercase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowercase = PixaStructTextConfig(**snake_case__ )
lowercase = PixaStructVisionConfig(**snake_case__ )
lowercase = self.text_config.decoder_start_token_id
lowercase = self.text_config.pad_token_id
lowercase = self.text_config.eos_token_id
lowercase = initializer_factor
lowercase = initializer_range
lowercase = self.initializer_range
lowercase = self.initializer_range
lowercase = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 72 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : Dict = nn.functional.normalize(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = nn.functional.normalize(_SCREAMING_SNAKE_CASE )
return torch.mm(_SCREAMING_SNAKE_CASE ,normalized_text_embeds.t() )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = CLIPConfig
lowerCamelCase_ : Optional[int] = ["""CLIPEncoderLayer"""]
def __init__( self , UpperCamelCase__ ) -> Any:
super().__init__(UpperCamelCase__ )
lowerCamelCase : Optional[int] = CLIPVisionModel(config.vision_config )
lowerCamelCase : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
lowerCamelCase : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase : Tuple = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase : List[str] = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase : List[str] = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : List[Any] = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
lowerCamelCase : Dict = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase : str = special_cos_dist[i][concept_idx]
lowerCamelCase : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
lowerCamelCase : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase : Union[str, Any] = cos_dist[i][concept_idx]
lowerCamelCase : List[Any] = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = self.vision_model(UpperCamelCase__ )[1] # pooled_output
lowerCamelCase : Dict = self.visual_projection(UpperCamelCase__ )
lowerCamelCase : Dict = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
lowerCamelCase : List[str] = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : int = 0.0
lowerCamelCase : int = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase : Optional[int] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase : Any = special_care * 0.01
lowerCamelCase : List[str] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase : Any = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase : int = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 311 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (KDPMaDiscreteScheduler,)
lowerCamelCase_ : str = 1_0
def _lowercase ( self , **UpperCamelCase__ ) -> int:
lowerCamelCase : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _lowercase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def _lowercase ( self ) -> str:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : int = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def _lowercase ( self ) -> Optional[int]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = output.prev_sample
lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 311 | 1 |
def snake_case (UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = [[] for _ in range(UpperCamelCase )]
lowerCamelCase__ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
lowerCamelCase__ = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
lowerCamelCase__ = ["""""".join(UpperCamelCase ) for row in temp_grid]
lowerCamelCase__ = """""".join(UpperCamelCase )
return output_string
def snake_case (UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowerCamelCase__ = [[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
lowerCamelCase__ = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowerCamelCase__ = 0
for row in temp_grid: # fills in the characters
lowerCamelCase__ = input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
lowerCamelCase__ = """""" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
lowerCamelCase__ = position % (lowest * 2) # puts it in bounds
lowerCamelCase__ = min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ = {}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
lowerCamelCase__ = decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 |
def snake_case (UpperCamelCase : dict ):
'''simple docstring'''
lowerCamelCase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase__ = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def snake_case (UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
'''simple docstring'''
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 235 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : int = 3
_lowerCamelCase : Tuple = (3_2, 3_2)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
def extract(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ):
class __snake_case :
def __init__( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = torch.ones([0] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Any = self.dummy_cond_unet
_lowerCamelCase : int = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.dummy_vae
_lowerCamelCase : List[str] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_lowerCamelCase : Union[str, Any] = 7_7
_lowerCamelCase : List[Any] = self.dummy_image.to(__lowerCAmelCase )
_lowerCamelCase : List[str] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCamelCase : int = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowerCamelCase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Dict = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[str] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : int = alt_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowerCAmelCase , )
_lowerCamelCase : Dict = output.images
_lowerCamelCase : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = alt_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : List[str] = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.dummy_cond_unet
_lowerCamelCase : str = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
_lowerCamelCase : int = self.dummy_vae
_lowerCamelCase : int = self.dummy_text_encoder
_lowerCamelCase : List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_lowerCamelCase : Tuple = 7_7
_lowerCamelCase : Any = self.dummy_image.to(__lowerCAmelCase )
# put models in fp16
_lowerCamelCase : List[str] = unet.half()
_lowerCamelCase : Union[str, Any] = vae.half()
_lowerCamelCase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
_lowerCamelCase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = alt_pipe.to(__lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = alt_pipe(
[prompt] , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , image=__lowerCAmelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCamelCase : List[str] = init_image.resize((7_6_0, 5_0_4) )
_lowerCamelCase : Any = "BAAI/AltDiffusion"
_lowerCamelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : Any = output.images[0]
_lowerCamelCase : Optional[int] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_lowerCamelCase : List[Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : Union[str, Any] = init_image.resize((7_6_8, 5_1_2) )
_lowerCamelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_lowerCamelCase : Dict = "BAAI/AltDiffusion"
_lowerCamelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Optional[int] = "A fantasy landscape, trending on artstation"
_lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 83 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 46 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = StableDiffusionLatentUpscalePipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase_ = frozenset([] )
UpperCAmelCase_ = True
@property
def snake_case__ ( self):
snake_case_ : Dict = 1
snake_case_ : int = 4
snake_case_ : int = (16, 16)
snake_case_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase_)
return image
def snake_case__ ( self):
torch.manual_seed(0)
snake_case_ : str = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=lowercase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=lowercase_ , only_cross_attention=lowercase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
snake_case_ : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
snake_case_ : List[str] = EulerDiscreteScheduler(prediction_type="sample")
snake_case_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
snake_case_ : List[str] = CLIPTextModel(lowercase_)
snake_case_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
snake_case_ : Optional[Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def snake_case__ ( self , lowercase_ , lowercase_=0):
if str(lowercase_).startswith("mps"):
snake_case_ : str = torch.manual_seed(lowercase_)
else:
snake_case_ : Any = torch.Generator(device=lowercase_).manual_seed(lowercase_)
snake_case_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
snake_case_ : Tuple = "cpu"
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Any = self.pipeline_class(**lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
snake_case_ : List[Any] = self.get_dummy_inputs(lowercase_)
snake_case_ : int = pipe(**lowercase_).images
snake_case_ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3))
snake_case_ : int = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055])
snake_case_ : Tuple = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowercase_ , 1E-3)
def snake_case__ ( self):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def snake_case__ ( self):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def snake_case__ ( self):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def snake_case__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def snake_case__ ( self):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def snake_case__ ( self):
super().test_save_load_local(expected_max_difference=3E-3)
def snake_case__ ( self):
super().test_save_load_optional_components(expected_max_difference=3E-3)
def snake_case__ ( self):
snake_case_ : List[Any] = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : List[str] = self.pipeline_class(**lowercase_)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
snake_case_ : Optional[int] = self.get_dummy_inputs(lowercase_)
snake_case_ : List[str] = 2
snake_case_ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case_ : Optional[int] = getattr(lowercase_ , scheduler_enum.name)
snake_case_ : Tuple = scheduler_cls.from_config(pipe.scheduler.config)
snake_case_ : Dict = pipe(**lowercase_)[0]
outputs.append(lowercase_)
assert check_same_shape(lowercase_)
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case__ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
snake_case_ : int = torch.manual_seed(33)
snake_case_ : int = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa)
pipe.to("cuda")
snake_case_ : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
snake_case_ : Dict = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
snake_case_ : Optional[int] = pipe(lowercase_ , generator=lowercase_ , output_type="latent").images
snake_case_ : List[Any] = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0]
snake_case_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy")
assert np.abs((expected_image - image).mean()) < 5E-2
def snake_case__ ( self):
snake_case_ : Union[str, Any] = torch.manual_seed(33)
snake_case_ : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa)
upscaler.to("cuda")
snake_case_ : Union[str, Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
snake_case_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png")
snake_case_ : Optional[Any] = upscaler(
prompt=lowercase_ , image=lowercase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowercase_ , output_type="np" , ).images[0]
snake_case_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy")
assert np.abs((expected_image - image).max()) < 5E-2
| 92 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
a_ = logging.get_logger(__name__)
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : List[Any] = r"\w+[.]\d+"
snake_case_ : Tuple = re.findall(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
for pat in pats:
snake_case_ : Union[str, Any] = key.replace(__SCREAMING_SNAKE_CASE, "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Tuple = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case_ : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case_ : Dict = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case_ : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case_ : str = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
snake_case_ : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE=4_2 ):
"""simple docstring"""
snake_case_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case_ : Optional[int] = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
snake_case_ : Union[str, Any] = flatten_dict(__SCREAMING_SNAKE_CASE )
snake_case_ : str = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
snake_case_ , snake_case_ : Dict = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
snake_case_ : Any = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 92 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__UpperCAmelCase : List[Any] = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCamelCase ) , "Postfix".center(UpperCamelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase ) == 0:
stack.append(UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
while len(UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCamelCase ) # return Postfix as str
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase ) ):
if infix[i] == "(":
__UpperCAmelCase : Optional[int] = ")" # change "(" to ")"
elif infix[i] == ")":
__UpperCAmelCase : int = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input("""\nEnter an Infix Equation = """) # Input an Infix equation
A = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 77 |
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = inspect.getfile(accelerate.test_utils )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase ( self ):
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__lowercase ,env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase = Accelerator()
_UpperCamelCase = (accelerator.state.process_index + 2, 10)
_UpperCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_UpperCamelCase = """"""
_UpperCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 715 |
"""simple docstring"""
_UpperCamelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 74 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
snake_case__ =len(UpperCamelCase_ )
for i in range(length - 1 ):
snake_case__ =i
for k in range(i + 1 , UpperCamelCase_ ):
if collection[k] < collection[least]:
snake_case__ =k
if least != i:
snake_case__ , snake_case__ =(collection[i], collection[least])
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 538 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
enable_full_determinism()
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Dict = UNetaDModel
a_ : List[Any] = '''sample'''
@property
def _lowercase ( self ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[int]:
return (3, 32, 32)
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ ={
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Union[str, Any] = UNetaDModel
a_ : Optional[Any] = '''sample'''
@property
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =4
snake_case__ =4
snake_case__ =(32, 32)
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] ).to(_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Optional[int]:
return (4, 32, 32)
@property
def _lowercase ( self ) -> Dict:
return (4, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> Dict:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model.to(_UpperCAmelCase )
snake_case__ =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _lowercase ( self ) -> Optional[Any]:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase )
model_accelerate.to(_UpperCAmelCase )
model_accelerate.eval()
snake_case__ =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
snake_case__ =model_accelerate(_UpperCAmelCase , _UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ =UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=_UpperCAmelCase , low_cpu_mem_usage=_UpperCAmelCase )
model_normal_load.to(_UpperCAmelCase )
model_normal_load.eval()
snake_case__ =model_normal_load(_UpperCAmelCase , _UpperCAmelCase )['sample']
assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(_UpperCAmelCase )
snake_case__ =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ =noise.to(_UpperCAmelCase )
snake_case__ =torch.tensor([10] * noise.shape[0] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) )
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : List[str] = UNetaDModel
a_ : Optional[int] = '''sample'''
@property
def _lowercase ( self , _UpperCAmelCase=(32, 32) ) -> Tuple:
snake_case__ =4
snake_case__ =3
snake_case__ =floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def _lowercase ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> Optional[Any]:
return (3, 32, 32)
def _lowercase ( self ) -> str:
snake_case__ ={
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ =self.dummy_input
return init_dict, inputs_dict
@slow
def _lowercase ( self ) -> List[Any]:
snake_case__ , snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_UpperCAmelCase )
snake_case__ =self.dummy_input
snake_case__ =floats_tensor((4, 3) + (256, 256) ).to(_UpperCAmelCase )
snake_case__ =noise
snake_case__ =model(**_UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(256, 256)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(_UpperCAmelCase )
snake_case__ =4
snake_case__ =3
snake_case__ =(32, 32)
snake_case__ =torch.ones((batch_size, num_channels) + sizes ).to(_UpperCAmelCase )
snake_case__ =torch.tensor(batch_size * [1E-4] ).to(_UpperCAmelCase )
with torch.no_grad():
snake_case__ =model(_UpperCAmelCase , _UpperCAmelCase ).sample
snake_case__ =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) )
def _lowercase ( self ) -> Optional[Any]:
# not required for this model
pass
| 538 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
SCREAMING_SNAKE_CASE_ :Optional[Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(a ):
os.makedirs(a )
SCREAMING_SNAKE_CASE_ :List[str] = model.state_dict()
def to_tf_var_name(a ):
for patt, repl in iter(a ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = name.replace(a , a )
return F"bert/{name}"
def create_tf_var(a , a , a ):
SCREAMING_SNAKE_CASE_ :int = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE_ :Tuple = tf.get_variable(dtype=a , shape=tensor.shape , name=a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE_ :Any = to_tf_var_name(a )
SCREAMING_SNAKE_CASE_ :List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE_ :Tuple = torch_tensor.T
SCREAMING_SNAKE_CASE_ :List[Any] = create_tf_var(tensor=a , name=a , session=a )
tf.keras.backend.set_value(a , a )
SCREAMING_SNAKE_CASE_ :Tuple = session.run(a )
print(F"Successfully created {tf_name}: {np.allclose(a , a )}" )
SCREAMING_SNAKE_CASE_ :List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(a , os.path.join(a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def lowercase ( a=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=a , required=a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=a , default=a , required=a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=a , required=a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=a , required=a , help="Directory in which to save tensorflow model" )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = parser.parse_args(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 140 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( A__ ):
_lowercase : int = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_lowercase : List[Any] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_lowercase : Optional[Any] = '''document_qa'''
_lowercase : Tuple = AutoProcessor
_lowercase : Tuple = VisionEncoderDecoderModel
_lowercase : str = ['''image''', '''text''']
_lowercase : List[str] = ['''text''']
def __init__( self , *a , **a) -> List[Any]:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
SCREAMING_SNAKE_CASE = task_prompt.replace('{user_input}' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = self.pre_processor(_UpperCAmelCase , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(_UpperCAmelCase)[0]
SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
SCREAMING_SNAKE_CASE = re.sub(R'<.*?>' , '' , _UpperCAmelCase , count=1).strip() # remove first task start token
SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(_UpperCAmelCase)
return sequence["answer"]
| 73 | from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 2_000 , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , **_UpperCAmelCase , ):
__snake_case : Union[str, Any] = self.unet.config.sample_size
__snake_case : Optional[int] = (batch_size, 3, img_size, img_size)
__snake_case : Union[str, Any] = self.unet
__snake_case : List[Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase ) * self.scheduler.init_noise_sigma
__snake_case : str = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCAmelCase )
self.scheduler.set_sigmas(_UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__snake_case : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__snake_case : Dict = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
__snake_case : Optional[int] = self.scheduler.step_correct(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# prediction step
__snake_case : Dict = model(_UpperCAmelCase , _UpperCAmelCase ).sample
__snake_case : Any = self.scheduler.step_pred(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
__snake_case , __snake_case : Union[str, Any] = output.prev_sample, output.prev_sample_mean
__snake_case : Union[str, Any] = sample_mean.clamp(0 , 1 )
__snake_case : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : List[str] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 576 | 0 |
'''simple docstring'''
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : str = generate_pascal_triangle(_lowercase )
for row_idx in range(_lowercase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def _a ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCAmelCase : list[list[int]] = []
for current_row_idx in range(_lowercase ):
__UpperCAmelCase : Optional[Any] = populate_current_row(_lowercase , _lowercase )
triangle.append(_lowercase )
return triangle
def _a ( _lowercase : list[list[int]] , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = 1, 1
for current_col_idx in range(1 , _lowercase ):
calculate_current_element(
_lowercase , _lowercase , _lowercase , _lowercase )
return current_row
def _a ( _lowercase : list[list[int]] , _lowercase : list[int] , _lowercase : int , _lowercase : int , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCAmelCase : List[str] = triangle[current_row_idx - 1][current_col_idx]
__UpperCAmelCase : int = above_to_left_elt + above_to_right_elt
def _a ( _lowercase : int ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , _lowercase ):
__UpperCAmelCase : List[str] = [0] + result[-1] + [0]
__UpperCAmelCase : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCAmelCase : List[Any] = sum(divmod(_lowercase , 2 ) )
__UpperCAmelCase : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCAmelCase : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCAmelCase : Dict = row_first_half + row_second_half
result.append(_lowercase )
return result
def _a ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowercase : Callable , _lowercase : int ) -> None:
__UpperCAmelCase : str = F'{func.__name__}({value})'
__UpperCAmelCase : int = timeit(F'__main__.{call}' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowercase , _lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 266 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__UpperCAmelCase :Optional[Any] = True
from torch.cuda.amp import autocast
__UpperCAmelCase :Any = logging.getLogger(__name__)
def _a ( _lowercase : List[str]=None , _lowercase : Union[str, Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=_a , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_a , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : WavaVecaProcessor
SCREAMING_SNAKE_CASE : Union[bool, str] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self : Any , snake_case : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCAmelCase : List[Any] = [{'''input_values''': feature['''input_values''']} for feature in features]
__UpperCAmelCase : List[str] = [{'''input_ids''': feature['''labels''']} for feature in features]
__UpperCAmelCase : List[Any] = self.processor.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCAmelCase : Union[str, Any] = self.processor.pad(
labels=snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase : List[str] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCAmelCase : str = labels
return batch
class a ( _a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] , snake_case : nn.Module , snake_case : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
__UpperCAmelCase : Any = self._prepare_inputs(snake_case )
if self.use_amp:
with autocast():
__UpperCAmelCase : Optional[Any] = self.compute_loss(snake_case , snake_case )
else:
__UpperCAmelCase : int = self.compute_loss(snake_case , snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase : Tuple = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case )
else:
loss.backward()
return loss.detach()
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase : str = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase : Union[str, Any] = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__UpperCAmelCase : Dict = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(_lowercase : Any ):
__UpperCAmelCase : str = re.sub(_lowercase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__UpperCAmelCase : Dict = train_dataset.map(_lowercase , remove_columns=['''sentence'''] )
__UpperCAmelCase : Optional[Any] = eval_dataset.map(_lowercase , remove_columns=['''sentence'''] )
def extract_all_chars(_lowercase : List[Any] ):
__UpperCAmelCase : Tuple = ''' '''.join(batch['''text'''] )
__UpperCAmelCase : List[Any] = list(set(_lowercase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase : Dict = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=train_dataset.column_names , )
__UpperCAmelCase : Tuple = train_dataset.map(
_lowercase , batched=_lowercase , batch_size=-1 , keep_in_memory=_lowercase , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase : Any = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__UpperCAmelCase : List[str] = {v: k for k, v in enumerate(_lowercase )}
__UpperCAmelCase : str = vocab_dict[''' ''']
del vocab_dict[" "]
__UpperCAmelCase : Dict = len(_lowercase )
__UpperCAmelCase : Optional[Any] = len(_lowercase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(_lowercase , _lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_lowercase , return_attention_mask=_lowercase )
__UpperCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase : Tuple = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = min(len(_lowercase ) , data_args.max_train_samples )
__UpperCAmelCase : Any = train_dataset.select(range(_lowercase ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase : Optional[int] = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase : List[Any] = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_lowercase : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase : int = torchaudio.load(batch['''path'''] )
__UpperCAmelCase : Optional[int] = resampler(_lowercase ).squeeze().numpy()
__UpperCAmelCase : Optional[int] = 16000
__UpperCAmelCase : Union[str, Any] = batch['''text''']
return batch
__UpperCAmelCase : int = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Any = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_lowercase : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
__UpperCAmelCase : int = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(_lowercase )
return batch
__UpperCAmelCase : int = train_dataset.map(
_lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Tuple = eval_dataset.map(
_lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase : List[Any] = datasets.load_metric('''wer''' )
def compute_metrics(_lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = pred.predictions
__UpperCAmelCase : int = np.argmax(_lowercase , axis=-1 )
__UpperCAmelCase : str = processor.tokenizer.pad_token_id
__UpperCAmelCase : Union[str, Any] = processor.batch_decode(_lowercase )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase : Tuple = processor.batch_decode(pred.label_ids , group_tokens=_lowercase )
__UpperCAmelCase : Optional[Any] = wer_metric.compute(predictions=_lowercase , references=_lowercase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase : List[str] = DataCollatorCTCWithPadding(processor=_lowercase , padding=_lowercase )
# Initialize our Trainer
__UpperCAmelCase : List[str] = CTCTrainer(
model=_lowercase , data_collator=_lowercase , args=_lowercase , compute_metrics=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : List[Any] = model_args.model_name_or_path
else:
__UpperCAmelCase : Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
__UpperCAmelCase : Tuple = train_result.metrics
__UpperCAmelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
__UpperCAmelCase : Tuple = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
__UpperCAmelCase : Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase : Tuple = trainer.evaluate()
__UpperCAmelCase : List[Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_lowercase )
__UpperCAmelCase : List[str] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
return results
if __name__ == "__main__":
main() | 266 | 1 |
"""simple docstring"""
def __snake_case ( __A ) -> Tuple:
lowercase : List[str] = [0] * len(__A )
lowercase : str = []
lowercase : Optional[int] = []
lowercase : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
lowercase : List[Any] = queue.pop(0 )
cnt += 1
topo.append(__A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__A )
if cnt != len(__A ):
print("""Cycle exists""" )
else:
print(__A )
# Adjacency List of Graph
lowerCAmelCase: Optional[Any] ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 607 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = MvpTokenizer
__UpperCAmelCase = MvpTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = filter_roberta_detectors
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : int = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Any = tokenizer(snake_case , padding=snake_case , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case )
self.assertIn("""attention_mask""" , snake_case )
self.assertNotIn("""labels""" , snake_case )
self.assertNotIn("""decoder_attention_mask""" , snake_case )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : int = tokenizer(text_target=snake_case , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : str = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=snake_case , truncation=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = ["""A long paragraph for summarization."""]
lowercase : List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] = tokenizer(snake_case , text_target=snake_case , return_tensors="""pt""" )
lowercase : Union[str, Any] = inputs["""input_ids"""]
lowercase : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : str = """A, <mask> AllenNLP sentence."""
lowercase : int = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase : str = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 607 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[str] =['image_processor', 'tokenizer']
__lowerCamelCase : Tuple ='ViTImageProcessor'
__lowerCamelCase : Optional[int] =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , __lowercase : int=None , __lowercase : Any=None , **__lowercase : Any ):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowercase , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowercase , __lowercase )
def __call__( self : List[Any] , __lowercase : Optional[int]=None , __lowercase : Optional[int]=None , __lowercase : List[str]=None , __lowercase : Tuple=None , **__lowercase : Optional[int] ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
__a = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None:
__a = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
__a = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None and images is not None:
__a = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
__a = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def UpperCamelCase_ ( self : List[str] , *__lowercase : int , **__lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowercase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowercase , )
return self.image_processor
| 547 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=1.0 , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : Optional[int]=400 , __lowercase : int=2000 , __lowercase : List[Any]=2048 , __lowercase : List[str]=128 , __lowercase : Union[str, Any]=1 , __lowercase : str=512 , __lowercase : List[str]=30 , __lowercase : Tuple=44100 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = spectrogram_length
__a = feature_size
__a = num_audio_channels
__a = hop_length
__a = chunk_length
__a = sampling_rate
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Any=False ):
'''simple docstring'''
def _flatten(__lowercase : Tuple ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =TvltFeatureExtractor
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowercase , """spectrogram_length""" ) )
self.assertTrue(hasattr(__lowercase , """feature_size""" ) )
self.assertTrue(hasattr(__lowercase , """num_audio_channels""" ) )
self.assertTrue(hasattr(__lowercase , """hop_length""" ) )
self.assertTrue(hasattr(__lowercase , """chunk_length""" ) )
self.assertTrue(hasattr(__lowercase , """sampling_rate""" ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
__a = self.feature_extraction_class.from_pretrained(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(__lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowercase )
__a = self.feature_extraction_class.from_json_file(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize feature_extractor
__a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a = feature_extractor(
__lowercase , return_tensors="""np""" , sampling_rate=44100 , mask_audio=__lowercase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(__lowercase )
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__a = ds.sort("""id""" ).select(range(__lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self._load_datasamples(1 )
__a = TvltFeatureExtractor()
__a = feature_extractor(__lowercase , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowercase , atol=1E-4 ) )
| 547 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase ):
stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return arr
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__snake_case , __snake_case : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__snake_case : List[str] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCamelCase , i + t , (__lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
if __name__ == "__main__":
_snake_case : List[Any] = input("Enter numbers separated by a comma:\n").strip()
_snake_case : str = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 81 |
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = []
__lowercase = set({"(", "[", "{"} )
__lowercase = set({")", "]", "}"} )
__lowercase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def snake_case_ ( ):
__lowercase = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 402 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 488 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''segformer'''
def __init__( self : List[Any] , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : int=[7, 3, 3, 3] , UpperCAmelCase_ : str=[4, 2, 2, 2] , UpperCAmelCase_ : List[str]=[1, 2, 5, 8] , UpperCAmelCase_ : List[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Dict=255 , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Any = sr_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : int = patch_sizes
SCREAMING_SNAKE_CASE : Optional[int] = strides
SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("reshape_last_stage" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : List[str] ):
return 1E-4
@property
def _A ( self : Any ):
return 12
| 488 | 1 |
from functools import reduce
_UpperCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCAmelCase__( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 243 |
from functools import reduce
_UpperCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCAmelCase__( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''PoolFormerFeatureExtractor''']
a__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : int=None ) -> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
_a : int = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
_a : Tuple = nn.Parameter(__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str] ,__a : Optional[Any] ) -> Dict:
"""simple docstring"""
_a : int = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(__a ).transpose(1 ,2 ).contiguous().view(-1 ,__a ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__a ).transpose(1 ,2 ).contiguous().view(-1 ,__a ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__a ).view(-1 ,__a ).contiguous().transpose(0 ,1 ) ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Optional[Any] ,__a : Any ) -> Optional[Any]:
"""simple docstring"""
_a : Union[str, Any] = np.asarray(weights[0] )
_a : Optional[Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
_a : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(__a ).transpose(1 ,2 ).contiguous().view(-1 ,__a ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(__a ).transpose(1 ,2 ).contiguous().view(-1 ,__a ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(__a ).transpose(1 ,2 ).contiguous().view(-1 ,__a ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(__a ).view(-1 ,__a ).contiguous().transpose(0 ,1 ) ,)
def __UpperCAmelCase ( __a : Tuple ,__a : Tuple ,__a : List[str] ) -> Dict:
"""simple docstring"""
_a : Any = weights[0][0][0]
_a : int = np.asarray(layer_norm_a[0] )
_a : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(__a ) ,torch.tensor(__a ) ,)
# lsh weights + output
_a : Tuple = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a ,torch_block.attention ,__a )
else:
set_layer_weights_in_torch_local(__a ,torch_block.attention ,__a )
# intermediate weighs
_a : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : str = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(__a ) ,torch.tensor(__a ) ,)
# intermediate dense
_a : List[str] = np.asarray(intermediate_weights[1][0] )
_a : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(__a ).transpose(0 ,1 ).contiguous() ,torch.tensor(__a ) ,)
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(__a ).transpose(0 ,1 ).contiguous() ,torch.tensor(__a ) ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a : List[Any] = torch_model.reformer
# word embeds
_a : Dict = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(__a ) ,)
if isinstance(weights[3] ,__a ):
_a : int = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
_a : int = nn.Parameter(torch.tensor(__a ) )
_a : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a ,__a ,__a )
# output layer norm
_a : Union[str, Any] = np.asarray(weights[7][0] )
_a : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(__a ) ,torch.tensor(__a ) ,)
# output embeddings
_a : Tuple = np.asarray(weights[9][0] )
_a : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(__a ).transpose(0 ,1 ).contiguous() ,torch.tensor(__a ) ,)
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : int ) -> str:
"""simple docstring"""
_a : List[str] = ReformerConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
_a : Optional[Any] = ReformerModelWithLMHead(__a )
with open(__a ,'''rb''' ) as f:
_a : Optional[Any] = pickle.load(__a )['''weights''']
set_model_weights_in_torch(__a ,__a ,config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 578 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _UpperCamelCase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Union[str, Any] = '''levit'''
def __init__( self , _a=224 , _a=3 , _a=3 , _a=2 , _a=1 , _a=16 , _a=[128, 256, 384] , _a=[4, 8, 12] , _a=[4, 4, 4] , _a=[16, 16, 16] , _a=0 , _a=[2, 2, 2] , _a=[2, 2, 2] , _a=0.02 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = image_size
a__ = num_channels
a__ = kernel_size
a__ = stride
a__ = padding
a__ = hidden_sizes
a__ = num_attention_heads
a__ = depths
a__ = key_dim
a__ = drop_path_rate
a__ = patch_size
a__ = attention_ratio
a__ = mlp_ratio
a__ = initializer_range
a__ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _UpperCamelCase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[Any] = version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 394 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str = None
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : int , snake_case : Dict , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : str , snake_case : Dict ):
"""simple docstring"""
raise NotImplementedError
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ):
"""simple docstring"""
return F"""`pip install {cls.pip_package or cls.name}`"""
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''optuna'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_optuna_available()
def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : Optional[int] ):
"""simple docstring"""
return run_hp_search_optuna(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Dict ):
"""simple docstring"""
return default_hp_space_optuna(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = '''ray'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''\'ray[tune]\''''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_ray_available()
def __UpperCAmelCase ( self : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , **snake_case : str ):
"""simple docstring"""
return run_hp_search_ray(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : Optional[Any] ):
"""simple docstring"""
return default_hp_space_ray(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''sigopt'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_sigopt_available()
def __UpperCAmelCase ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : str , **snake_case : Dict ):
"""simple docstring"""
return run_hp_search_sigopt(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : int , snake_case : List[Any] ):
"""simple docstring"""
return default_hp_space_sigopt(snake_case )
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''wandb'''
@staticmethod
def __UpperCAmelCase ( ):
"""simple docstring"""
return is_wandb_available()
def __UpperCAmelCase ( self : Dict , snake_case : List[str] , snake_case : int , snake_case : str , **snake_case : Optional[Any] ):
"""simple docstring"""
return run_hp_search_wandb(snake_case , snake_case , snake_case , **snake_case )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : int ):
"""simple docstring"""
return default_hp_space_wandb(snake_case )
SCREAMING_SNAKE_CASE_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_snake_case : Optional[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__) > 0:
_snake_case : Any = available_backends[0].name
if len(a__) > 1:
logger.info(
F"""{len(a__)} hyperparameter search backends available. Using {name} as the default.""")
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 517 | 0 |
'''simple docstring'''
_lowerCamelCase : Optional[Any] = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_lowerCamelCase : Optional[Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowerCAmelCase ( __a ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
_UpperCamelCase :Union[str, Any] ="""Morse code here!"""
print(__a )
_UpperCamelCase :List[Any] =encrypt(__a )
print(__a )
_UpperCamelCase :Optional[int] =decrypt(__a )
print(__a )
if __name__ == "__main__":
main() | 704 | '''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase :int =9
_UpperCamelCase :Optional[int] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase :Optional[int] =kruskal(__a , __a )
_UpperCamelCase :Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a ) | 512 | 0 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase_ =0
print(lowercase__ , end="," )
# Consider rest of the activities
for j in range(lowercase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase__ , end="," )
UpperCAmelCase_ =j
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Dict =[1, 3, 0, 5, 8, 5]
__lowercase : List[str] =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = MvpTokenizer
snake_case__ : Optional[int] = MvpTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[Any] = filter_roberta_detectors
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCamelCase : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : Optional[int] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowerCamelCase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[str] = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCamelCase : int = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , __lowerCAmelCase )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertNotIn('''labels''' , __lowerCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[int] = tokenizer(text_target=__lowerCAmelCase , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = ['''A long paragraph for summarization.''']
_lowerCamelCase : List[str] = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Union[str, Any] = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : int = inputs['''input_ids''']
_lowerCamelCase : Optional[Any] = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : str = '''A, <mask> AllenNLP sentence.'''
_lowerCamelCase : int = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 598 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any]="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) as f:
_lowerCamelCase : Tuple = json.load(A_ )
_lowerCamelCase : int = {}
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = []
for key, info in class_info.items():
_lowerCamelCase : Optional[int] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(A_ ) )
_lowerCamelCase : List[Any] = thing_ids
_lowerCamelCase : Any = class_names
return metadata
class __snake_case ( unittest.TestCase):
def __init__( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=3_0 , __lowerCAmelCase : Optional[int]=4_0_0 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : str=False , __lowerCAmelCase : int=2_5_5 , __lowerCAmelCase : Dict="shi-labs/oneformer_demo" , __lowerCAmelCase : str="ade20k_panoptic.json" , __lowerCAmelCase : Dict=1_0 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : int = min_resolution
_lowerCamelCase : Tuple = max_resolution
_lowerCamelCase : Dict = do_resize
_lowerCamelCase : Dict = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Optional[Any] = image_mean
_lowerCamelCase : Optional[int] = image_std
_lowerCamelCase : Any = class_info_file
_lowerCamelCase : Any = prepare_metadata(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = num_text
_lowerCamelCase : int = repo_path
# for the post_process_functions
_lowerCamelCase : int = 2
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : str = 1_0
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : List[Any] = 4
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = do_reduce_labels
_lowerCamelCase : Tuple = ignore_index
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
if not batched:
_lowerCamelCase : Tuple = image_inputs[0]
if isinstance(__lowerCAmelCase , Image.Image ):
_lowerCamelCase , _lowerCamelCase : List[str] = image.size
else:
_lowerCamelCase , _lowerCamelCase : int = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : Any = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase : Union[str, Any] = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase : Union[str, Any] = self.size['''shortest_edge''']
_lowerCamelCase : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase : int = self.size['''shortest_edge''']
_lowerCamelCase : Optional[Any] = self.size['''shortest_edge''']
else:
_lowerCamelCase : Optional[Any] = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[0] )[0]
_lowerCamelCase : Optional[int] = max(__lowerCAmelCase , key=lambda __lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ : List[str] = image_processing_class
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''ignore_index''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''class_info_file''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''num_text''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''repo_path''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''metadata''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_reduce_labels''' ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Any = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : int = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : List[Any] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(__lowerCAmelCase , batched=__lowerCAmelCase )
_lowerCamelCase : int = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]="np" ):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_lowerCamelCase : Optional[Any] = self.image_processing_tester.num_labels
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCAmelCase )
if with_segmentation_maps:
_lowerCamelCase : int = num_labels
if is_instance_map:
_lowerCamelCase : List[Any] = list(range(__lowerCAmelCase ) ) * 2
_lowerCamelCase : Dict = dict(enumerate(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_lowerCamelCase : str = [Image.fromarray(__lowerCAmelCase ) for annotation in annotations]
_lowerCamelCase : List[str] = image_processor(
__lowerCAmelCase , ['''semantic'''] * len(__lowerCAmelCase ) , __lowerCAmelCase , return_tensors='''pt''' , instance_id_to_semantic_id=__lowerCAmelCase , pad_and_return_pixel_mask=__lowerCAmelCase , )
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
def common(__lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=None ):
_lowerCamelCase : Dict = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowerCAmelCase , is_instance_map=__lowerCAmelCase , segmentation_type=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inputs['''mask_labels''']
_lowerCamelCase : Union[str, Any] = inputs['''class_labels''']
_lowerCamelCase : Dict = inputs['''pixel_values''']
_lowerCamelCase : int = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowerCAmelCase )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
common(is_instance_map=__lowerCAmelCase , segmentation_type='''pil''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = np.zeros((2_0, 5_0) )
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Tuple = 1
_lowerCamelCase : int = binary_mask_to_rle(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_lowerCamelCase : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_lowerCamelCase : Dict = fature_extractor.post_process_semantic_segmentation(__lowerCAmelCase , target_sizes=__lowerCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[Any] = image_processor.post_process_instance_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_lowerCamelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(__lowerCAmelCase , threshold=0 )
self.assertTrue(len(__lowerCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , __lowerCAmelCase )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 598 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """Hello world! cécé herlolip"""
SCREAMING_SNAKE_CASE__ : Any = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ : Any = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
UpperCAmelCase__ : int = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
UpperCAmelCase__ : Tuple = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ : List[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Any = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
UpperCAmelCase__ : Tuple = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Dict = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ : List[str] = encoder_input_ids
UpperCAmelCase__ : List[str] = decoder_input_ids
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ : Any = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : List[str] = original.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : int = new_model.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : Any = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : str = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 79 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__magic_name__ : Dict = list[list[float | int]]
def A__ ( A_ , A_ ) -> Matrix:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(size + 1 )] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for row in range(A_ ):
for col in range(A_ ):
_lowercase = matrix[row][col]
_lowercase = vector[row][0]
_lowercase = 0
_lowercase = 0
while row < size and col < size:
# pivoting
_lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A_ , A_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A_ ):
_lowercase = augmented[rowa][col] / augmented[row][col]
_lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A_ ):
for row in range(A_ ):
_lowercase = augmented[row][col] / augmented[col][col]
for cola in range(A_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A_ )
]
def A__ ( A_ ) -> Callable[[int], int]:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(A_ )] for _ in range(A_ )]
_lowercase = [[0] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for x_val, y_val in enumerate(A_ ):
for col in range(A_ ):
_lowercase = (x_val + 1) ** (size - col - 1)
_lowercase = y_val
_lowercase = solve(A_ , A_ )
def interpolated_func(A_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A_ ) )
return interpolated_func
def A__ ( A_ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( A_ = question_function , A_ = 10 ) -> int:
_lowercase = [func(A_ ) for x_val in range(1 , order + 1 )]
_lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase = 0
_lowercase = 42
_lowercase = 42
for poly in polynomials:
_lowercase = 1
while func(A_ ) == poly(A_ ):
x_val += 1
ret += poly(A_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 497 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__A : str = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 398 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""c"""] )
self.assertEqual(lowerCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(["""a""", """c"""] , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , [0, 2] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase_, lowerCAmelCase_ : Any = get_aligned_output_features_output_indices(lowerCamelCase , [-3, -1] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [-3, -1] )
def __lowercase ( self : List[str] ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , lowerCamelCase )
# Out features must be a list
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def __lowercase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = BackboneMixin()
lowerCAmelCase_ : List[str] = ["""a""", """b""", """c"""]
lowerCAmelCase_ : Tuple = ["""a""", """c"""]
lowerCAmelCase_ : Optional[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase_ : int = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase_ : Dict = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 398 | 1 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_A = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = test_results.split(" " )
__UpperCamelCase = 0
__UpperCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__UpperCamelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = {}
__UpperCamelCase = None
__UpperCamelCase = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" ,_UpperCAmelCase ):
__UpperCamelCase = True
__UpperCamelCase = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
__UpperCamelCase = line
__UpperCamelCase = False
return failures
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : str , A_ : Dict )-> Dict:
__UpperCamelCase = title
__UpperCamelCase = doc_test_results["""time_spent"""].split("," )[0]
__UpperCamelCase = doc_test_results["""success"""]
__UpperCamelCase = doc_test_results["""failures"""]
__UpperCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__UpperCamelCase = doc_test_results
@property
def A ( self : Union[str, Any] )-> str:
__UpperCamelCase = [self._time_spent]
__UpperCamelCase = 0
for time in time_spent:
__UpperCamelCase = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A_ ) == 1:
__UpperCamelCase = [0, 0, time_parts[0]]
__UpperCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
__UpperCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(A_ )}h{int(A_ )}m{int(A_ )}s"""
@property
def A ( self : Optional[int] )-> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def A ( self : Optional[Any] )-> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def A ( self : Union[str, Any] )-> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def A ( self : int )-> Dict:
__UpperCamelCase = 40
__UpperCamelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A_ , A_ )}
__UpperCamelCase = """"""
for category, failures in category_failures.items():
if len(A_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def A ( self : str )-> str:
__UpperCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A_ )
@staticmethod
def A ( )-> int:
__UpperCamelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(A_ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=A_ , )
def A ( self : Optional[Any] )-> List[str]:
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
__UpperCamelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
__UpperCamelCase = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=A_ , )
def A ( self : int , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , A_ : Any )-> Any:
__UpperCamelCase = """"""
for key, value in failures.items():
__UpperCamelCase = value[:2_00] + """ [Truncated]""" if len(A_ ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
__UpperCamelCase = job_name
__UpperCamelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
__UpperCamelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def A ( self : List[str] )-> Any:
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
__UpperCamelCase = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
__UpperCamelCase = sorted(self.doc_test_results.items() , key=lambda A_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
__UpperCamelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
__UpperCamelCase = job_result["""failures"""]
__UpperCamelCase = self.get_reply_blocks(A_ , A_ , A_ , text=A_ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f"""Results for {job}""" , blocks=A_ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase () -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = os.environ["""GITHUB_RUN_ID"""]
__UpperCamelCase = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
__UpperCamelCase = requests.get(_UpperCAmelCase ).json()
__UpperCamelCase = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__UpperCamelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_UpperCAmelCase ):
__UpperCamelCase = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." ,_UpperCAmelCase )
return {}
def lowercase (_snake_case ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = {}
if os.path.exists(_UpperCAmelCase ):
__UpperCamelCase = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,encoding="utf-8" ) as f:
__UpperCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCAmelCase ,_UpperCAmelCase )}.""" ) from e
return _artifact
def lowercase () -> str:
'''simple docstring'''
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , A_ : str )-> List[Any]:
__UpperCamelCase = name
__UpperCamelCase = []
def __str__( self : Tuple )-> List[Any]:
return self.name
def A ( self : List[str] , A_ : str )-> List[str]:
self.paths.append({"name": self.name, "path": path} )
__UpperCamelCase = {}
__UpperCamelCase = filter(os.path.isdir ,os.listdir() )
for directory in directories:
__UpperCamelCase = directory
if artifact_name not in _available_artifacts:
__UpperCamelCase = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
_A = get_job_links()
_A = retrieve_available_artifacts()
_A = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_A = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_A = github_actions_job_links.get("run_doctests")
_A = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
_A = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_A = handle_test_results(artifact["stats"])
_A = failed
_A = success
_A = time_spent[1:-1] + """, """
_A = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_A = line.replace("FAILED ", "")
_A = line.split()[0].replace("\n", "")
if "::" in line:
_A = line.split("::")
else:
_A = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_A = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_A = all_failures[test] if test in all_failures else """N/A"""
_A = failure
break
_A = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply() | 505 | from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : Any = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=8 ):
lowerCamelCase_: Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase_: int = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any , A_ : MultilingualCLIP , A_ : XLMRobertaTokenizer , A_ : UNetaDConditionModel , A_ : Union[DDIMScheduler, DDPMScheduler] , A_ : VQModel , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self : Union[str, Any] , A_ : Dict , A_ : Any , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
lowerCamelCase_: Optional[Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_: Dict = latents.to(A_ )
lowerCamelCase_: Dict = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase ( self : List[str] , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : int , A_ : List[str] , A_ : Any=None , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
lowerCamelCase_: Dict = self.tokenizer(
A_ , padding="""max_length""" , truncation=A_ , max_length=77 , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCamelCase_: Union[str, Any] = text_inputs.input_ids
lowerCamelCase_: List[Any] = self.tokenizer(A_ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A_ , A_ ):
lowerCamelCase_: Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_: List[Any] = text_input_ids.to(A_ )
lowerCamelCase_: Dict = text_inputs.attention_mask.to(A_ )
lowerCamelCase_ , lowerCamelCase_: Dict = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
lowerCamelCase_: Any = prompt_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Optional[Any] = text_encoder_hidden_states.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Tuple = text_mask.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_: List[str]
if negative_prompt is None:
lowerCamelCase_: Dict = [""""""] * batch_size
elif type(A_ ) is not type(A_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
f""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
lowerCamelCase_: Optional[Any] = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCamelCase_: Tuple = negative_prompt
lowerCamelCase_: Tuple = self.tokenizer(
A_ , padding="""max_length""" , max_length=77 , truncation=A_ , return_attention_mask=A_ , add_special_tokens=A_ , return_tensors="""pt""" , )
lowerCamelCase_: Tuple = uncond_input.input_ids.to(A_ )
lowerCamelCase_: Union[str, Any] = uncond_input.attention_mask.to(A_ )
lowerCamelCase_ , lowerCamelCase_: int = self.text_encoder(
input_ids=A_ , attention_mask=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_: List[str] = negative_prompt_embeds.shape[1]
lowerCamelCase_: Any = negative_prompt_embeds.repeat(1 , A_ )
lowerCamelCase_: Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ )
lowerCamelCase_: str = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase_: List[Any] = uncond_text_encoder_hidden_states.repeat(1 , A_ , 1 )
lowerCamelCase_: Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , A_ , -1 )
lowerCamelCase_: Tuple = uncond_text_mask.repeat_interleave(A_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_: List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase_: int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase_: Union[str, Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase ( self : str , A_ : Dict=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase_: Any = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_: List[str] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def lowerCAmelCase ( self : List[Any] , A_ : Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase_: Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_: int = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_: Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
if self.safety_checker is not None:
lowerCamelCase_ , lowerCamelCase_: int = cpu_offload_with_hook(self.safety_checker , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_: Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : Optional[Any] , A_ : Union[str, List[str]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Optional[Union[str, List[str]]] = None , A_ : int = 5_12 , A_ : int = 5_12 , A_ : int = 1_00 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Any:
"""simple docstring"""
if isinstance(A_ , A_ ):
lowerCamelCase_: List[str] = 1
elif isinstance(A_ , A_ ):
lowerCamelCase_: int = len(A_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
lowerCamelCase_: int = self._execution_device
lowerCamelCase_: Optional[int] = batch_size * num_images_per_prompt
lowerCamelCase_: Optional[int] = guidance_scale > 1.0
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: int = self._encode_prompt(
A_ , A_ , A_ , A_ , A_ )
if isinstance(A_ , A_ ):
lowerCamelCase_: int = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
lowerCamelCase_: Tuple = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_: List[str] = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: Optional[int] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_: List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_: List[str] = self.scheduler.timesteps
lowerCamelCase_: Union[str, Any] = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_: List[str] = get_new_h_w(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_: List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_: List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_: Tuple = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCamelCase_: Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_: Any = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_: Any = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_: str = variance_pred.chunk(2 )
lowerCamelCase_: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_: Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_: Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_: Optional[int] = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , ).prev_sample
# post-processing
lowerCamelCase_: Dict = self.movq.decode(A_ , force_not_quantize=A_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_: str = image * 0.5 + 0.5
lowerCamelCase_: str = image.clamp(0 , 1 )
lowerCamelCase_: Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_: str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 423 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__lowerCAmelCase : Optional[Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ (_lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int )-> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case: List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
snake_case: Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
snake_case: Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
snake_case: Union[str, Any] = value
elif weight_type == "weight_g":
snake_case: Dict = value
elif weight_type == "weight_v":
snake_case: Optional[int] = value
elif weight_type == "bias":
snake_case: Dict = value
else:
snake_case: List[str] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def a_ (_lowerCAmelCase : Optional[int] , _lowerCAmelCase : str )-> int:
snake_case: Union[str, Any] = []
snake_case: Union[str, Any] = fairseq_model.state_dict()
snake_case: str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case: List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case: Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case: Union[str, Any] = True
if "*" in mapped_key:
snake_case: int = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
snake_case: Union[str, Any] = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
snake_case: List[Any] = """weight_g"""
elif "weight_v" in name:
snake_case: str = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
snake_case: Dict = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case: Any = """weight"""
else:
snake_case: List[str] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def a_ (_lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] )-> Union[str, Any]:
snake_case: Any = full_name.split("""conv_layers.""" )[-1]
snake_case: Optional[Any] = name.split(""".""" )
snake_case: Dict = int(items[0] )
snake_case: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
snake_case: Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
snake_case: str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
snake_case: Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
snake_case: Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ (_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None )-> Union[str, Any]:
# load the pre-trained checkpoints
snake_case: str = torch.load(_lowerCAmelCase )
snake_case: List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
snake_case: Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
snake_case: Tuple = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
snake_case: Union[str, Any] = WavLMConfig()
snake_case: str = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 701 | import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__lowerCAmelCase : List[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCAmelCase : str = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def a_ (_lowerCAmelCase : Optional[Any] )-> Optional[int]:
snake_case: Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case: Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case: int = numpy_to_pil(_lowerCAmelCase )
return images
def a_ (_lowerCAmelCase : Union[str, Any] )-> Dict:
if images.ndim == 3:
snake_case: List[Any] = images[None, ...]
snake_case: str = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case: int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
snake_case: Dict = [Image.fromarray(_lowerCAmelCase ) for image in images]
return pil_images
| 164 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
# Initialise PyTorch model
_A = FunnelConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
_A = FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 292 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]:
"""simple docstring"""
_A , _A = set(__UpperCamelCase ), [start]
while stack:
_A = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
lowerCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 292 | 1 |
'''simple docstring'''
class _a :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """"""
lowercase_ = """"""
lowercase_ = []
def lowerCamelCase__ ( self : List[str] , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ = self.__min_dist_top_down_dp(lowercase_ , n - 1 )
lowercase_ = self.__min_dist_top_down_dp(m - 1 , lowercase_ )
lowercase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
lowercase_ = worda
lowercase_ = worda
lowercase_ = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 )
def lowerCamelCase__ ( self : Tuple , lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
lowercase_ = worda
lowercase_ = worda
lowercase_ = len(lowercase_ )
lowercase_ = len(lowercase_ )
lowercase_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ = j
elif j == 0: # second string is empty
lowercase_ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ = self.dp[i - 1][j - 1]
else:
lowercase_ = self.dp[i][j - 1]
lowercase_ = self.dp[i - 1][j]
lowercase_ = self.dp[i - 1][j - 1]
lowercase_ = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
__snake_case = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__snake_case = input("""Enter the first string: """).strip()
__snake_case = input("""Enter the second string: """).strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 717 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__snake_case = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 603 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
A = import_module('tasks' )
try:
A = getattr(snake_case__ , model_args.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A = token_classification_task.get_labels(data_args.labels )
A = dict(enumerate(snake_case__ ) )
A = len(snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , )
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]:
A = np.argmax(snake_case__ , axis=2 )
A , A = preds.shape
A = [[] for _ in range(snake_case__ )]
A = [[] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
A , A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__ ),
"precision": precision_score(snake_case__ , snake_case__ ),
"recall": recall_score(snake_case__ , snake_case__ ),
"f1": fa_score(snake_case__ , snake_case__ ),
}
# Data collator
A = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
results.update(snake_case__ )
# Predict
if training_args.do_predict:
A = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A , A , A = trainer.predict(snake_case__ )
A , A = align_predictions(snake_case__ , snake_case__ )
A = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
A = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ )
return results
def _snake_case ( snake_case__ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 91 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowercase = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
A = len(self.sp_model ) + self.fairseq_offset
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> Any:
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str ,A_ : str ) -> Optional[Any]:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 91 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : List[str] , __snake_case : List[Any]=99 , __snake_case : int=13 , __snake_case : Dict=7 , __snake_case : Tuple=9 , __snake_case : str=True , __snake_case : Any=True , __snake_case : int=False , __snake_case : Tuple=32 , __snake_case : List[Any]=5 , __snake_case : Optional[Any]=4 , __snake_case : Any=37 , __snake_case : Any=8 , __snake_case : str=0.1 , __snake_case : Tuple=0.0_02 , __snake_case : List[str]=1 , __snake_case : List[Any]=0 , __snake_case : List[str]=0 , __snake_case : Union[str, Any]=None , __snake_case : Dict=None , )-> Tuple:
snake_case = parent
snake_case = batch_size
snake_case = encoder_seq_length
snake_case = decoder_seq_length
# For common tests
snake_case = self.decoder_seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = d_ff
snake_case = relative_attention_num_buckets
snake_case = dropout_rate
snake_case = initializer_factor
snake_case = eos_token_id
snake_case = pad_token_id
snake_case = decoder_start_token_id
snake_case = None
snake_case = decoder_layers
def lowerCAmelCase ( self : Any )-> List[str]:
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : Dict=None , __snake_case : Tuple=None , __snake_case : str=None , __snake_case : List[str]=None , )-> Any:
if attention_mask is None:
snake_case = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__snake_case )
if decoder_head_mask is None:
snake_case = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__snake_case )
if cross_attn_head_mask is None:
snake_case = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case = input_ids.clamp(self.pad_token_id + 1 )
snake_case = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case = self.get_config()
snake_case = config.num_attention_heads
snake_case = self.prepare_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, input_dict
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case , snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Dict )-> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , )-> Dict:
snake_case = UMTaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
input_ids=__snake_case , decoder_input_ids=__snake_case , attention_mask=__snake_case , decoder_attention_mask=__snake_case , )
snake_case = model(input_ids=__snake_case , decoder_input_ids=__snake_case )
snake_case = result.last_hidden_state
snake_case = result.past_key_values
snake_case = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__snake_case ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : int , __snake_case : str , )-> str:
snake_case = UMTaModel(config=__snake_case ).get_decoder().to(__snake_case ).eval()
# first forward pass
snake_case = model(__snake_case , use_cache=__snake_case )
snake_case = model(__snake_case )
snake_case = model(__snake_case , use_cache=__snake_case )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) + 1 )
snake_case , snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = model(__snake_case )["""last_hidden_state"""]
snake_case = model(__snake_case , past_key_values=__snake_case )["""last_hidden_state"""]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowerCAmelCase ( self : int , __snake_case : Tuple , __snake_case : Dict , )-> Union[str, Any]:
snake_case = UMTaModel(config=__snake_case ).to(__snake_case ).half().eval()
snake_case = model(**__snake_case )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__snake_case ).any().item() )
@require_torch
class _lowerCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
snake_case_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
snake_case_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = True
snake_case_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
snake_case_ = [0.8, 0.9]
def lowerCAmelCase ( self : Any )-> int:
snake_case = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = UMTaModel(config_and_inputs[0] ).to(__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__snake_case , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__snake_case , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__snake_case )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = config_and_inputs[0]
snake_case = UMTaForConditionalGeneration(__snake_case ).eval()
model.to(__snake_case )
snake_case = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__snake_case ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__snake_case ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__snake_case ),
}
for attn_name, (name, mask) in zip(__snake_case , head_masking.items() ):
snake_case = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case = torch.ones(
config.num_decoder_layers , config.num_heads , device=__snake_case )
snake_case = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__snake_case , return_dict_in_generate=__snake_case , **__snake_case , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCAmelCase ( self : str )-> int:
snake_case = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__snake_case ).to(__snake_case )
snake_case = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__snake_case , legacy=__snake_case )
snake_case = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
snake_case = tokenizer(__snake_case , return_tensors="""pt""" , padding=__snake_case ).input_ids
# fmt: off
snake_case = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__snake_case , __snake_case )
snake_case = model.generate(input_ids.to(__snake_case ) )
snake_case = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
snake_case = tokenizer.batch_decode(__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 517 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Optional[int]:
snake_case = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case = [key for key, value in counts.items() if value > 1]
snake_case = []
for duplicate_key in duplicates:
snake_case = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowerCamelCase ( __lowerCAmelCase : Any=False ) -> Optional[Any]:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
snake_case = yaml.safe_load(f.read() )
# Get to the API doc
snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case = content[api_idx]["""sections"""]
# Then to the model doc
snake_case = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case = api_doc[model_idx]["""sections"""]
snake_case = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if """sections""" in section]
snake_case = False
for idx, modality_doc in modalities_docs:
snake_case = modality_doc["""sections"""]
snake_case = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case = True
if overwrite:
snake_case = new_modality_doc
if diff:
if overwrite:
snake_case = model_doc
snake_case = api_doc
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 517 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Union[str, Any]=0.999 , UpperCamelCase: Optional[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase: Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase: List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__lowerCAmelCase = []
for i in range(UpperCamelCase ):
__lowerCAmelCase = i / num_diffusion_timesteps
__lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) )
return torch.tensor(UpperCamelCase , dtype=torch.floataa )
class a ( __UpperCAmelCase , __UpperCAmelCase ):
lowercase_ : str = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : int = 2
@register_to_config
def __init__( self : Optional[Any] , snake_case__ : int = 1_000 , snake_case__ : float = 0.0_0_0_8_5 , snake_case__ : float = 0.0_1_2 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : str = "epsilon" , snake_case__ : Optional[bool] = False , snake_case__ : Optional[bool] = False , snake_case__ : float = 1.0 , snake_case__ : str = "linspace" , snake_case__ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__lowerCAmelCase = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCAmelCase = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase = betas_for_alpha_bar(snake_case__ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__lowerCAmelCase = betas_for_alpha_bar(snake_case__ , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
__lowerCAmelCase = 1.0 - self.betas
__lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case__ , snake_case__ , snake_case__ )
__lowerCAmelCase = use_karras_sigmas
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] , snake_case__ : Tuple=None ):
"""simple docstring"""
if schedule_timesteps is None:
__lowerCAmelCase = self.timesteps
__lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCAmelCase = 1 if len(snake_case__ ) > 1 else 0
else:
__lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
__lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
__lowerCAmelCase = self.index_for_timestep(snake_case__ )
__lowerCAmelCase = self.sigmas[step_index]
__lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self : Dict , snake_case__ : int , snake_case__ : Union[str, torch.device] = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , snake_case__ , dtype=snake_case__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCAmelCase = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(snake_case__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCAmelCase = (np.arange(snake_case__ , 0 , -step_ratio )).round().copy().astype(snake_case__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
__lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCAmelCase = np.log(snake_case__ )
__lowerCAmelCase = np.interp(snake_case__ , np.arange(0 , len(snake_case__ ) ) , snake_case__ )
if self.config.use_karras_sigmas:
__lowerCAmelCase = self._convert_to_karras(in_sigmas=snake_case__ , num_inference_steps=self.num_inference_steps )
__lowerCAmelCase = np.array([self._sigma_to_t(snake_case__ , snake_case__ ) for sigma in sigmas] )
__lowerCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ )
__lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCAmelCase = torch.from_numpy(snake_case__ )
__lowerCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case__ ).startswith("mps" ):
# mps does not support float64
__lowerCAmelCase = timesteps.to(snake_case__ , dtype=torch.floataa )
else:
__lowerCAmelCase = timesteps.to(device=snake_case__ )
# empty dt and derivative
__lowerCAmelCase = None
__lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCAmelCase = defaultdict(snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = np.log(snake_case__ )
# get distribution
__lowerCAmelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCAmelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCAmelCase = low_idx + 1
__lowerCAmelCase = log_sigmas[low_idx]
__lowerCAmelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCAmelCase = (low - log_sigma) / (low - high)
__lowerCAmelCase = np.clip(snake_case__ , 0 , 1 )
# transform interpolation to time range
__lowerCAmelCase = (1 - w) * low_idx + w * high_idx
__lowerCAmelCase = t.reshape(sigma.shape )
return t
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = in_sigmas[-1].item()
__lowerCAmelCase = in_sigmas[0].item()
__lowerCAmelCase = 7.0 # 7.0 is the value used in the paper
__lowerCAmelCase = np.linspace(0 , 1 , snake_case__ )
__lowerCAmelCase = sigma_min ** (1 / rho)
__lowerCAmelCase = sigma_max ** (1 / rho)
__lowerCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return self.dt is None
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : Union[float, torch.FloatTensor] , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : bool = True , ):
"""simple docstring"""
__lowerCAmelCase = self.index_for_timestep(snake_case__ )
# advance index counter by 1
__lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCAmelCase = self.sigmas[step_index]
__lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCAmelCase = self.sigmas[step_index - 1]
__lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCAmelCase = 0
__lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
__lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCAmelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCAmelCase = derivative
__lowerCAmelCase = dt
__lowerCAmelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCAmelCase = (sample - pred_original_sample) / sigma_next
__lowerCAmelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCAmelCase = self.dt
__lowerCAmelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
"""simple docstring"""
__lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case__ ):
# mps does not support float64
__lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCAmelCase = self.timesteps.to(original_samples.device )
__lowerCAmelCase = timesteps.to(original_samples.device )
__lowerCAmelCase = [self.index_for_timestep(snake_case__ , snake_case__ ) for t in timesteps]
__lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCAmelCase = sigma.unsqueeze(-1 )
__lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ):
"""simple docstring"""
return self.config.num_train_timesteps
| 611 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : str , snake_case__ : Dict , snake_case__ : Optional[int]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Dict=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Tuple=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=10 , snake_case__ : List[str]=0.0_2 , snake_case__ : int=3 , snake_case__ : Tuple=None , snake_case__ : Any=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ : List[str] = False
lowercase_ : Dict = False
lowercase_ : str = False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
__lowerCAmelCase = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__lowerCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(snake_case__ )
| 611 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = [1, 1_088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[str] = "relu" , )-> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Convad(
UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , )
UpperCamelCase = nn.BatchNormad(UpperCAmelCase_ )
UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.convolution(UpperCAmelCase_ )
UpperCamelCase = self.normalization(UpperCAmelCase_ )
UpperCamelCase = self.activation(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : RegNetConfig )-> str:
"""simple docstring"""
super().__init__()
UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCamelCase = config.num_channels
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCamelCase = self.embedder(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_ )
UpperCamelCase = nn.BatchNormad(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Tensor )-> Tensor:
"""simple docstring"""
UpperCamelCase = self.convolution(UpperCAmelCase_ )
UpperCamelCase = self.normalization(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
UpperCamelCase = nn.Sequential(
nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Dict )-> int:
"""simple docstring"""
# b c h w -> b c 1 1
UpperCamelCase = self.pooler(UpperCAmelCase_ )
UpperCamelCase = self.attention(UpperCAmelCase_ )
UpperCamelCase = hidden_state * attention
return hidden_state
class __a ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 )-> int:
"""simple docstring"""
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , groups=UpperCAmelCase_ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : int )-> Dict:
"""simple docstring"""
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(UpperCAmelCase_ )
UpperCamelCase = self.shortcut(UpperCAmelCase_ )
hidden_state += residual
UpperCamelCase = self.activation(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : int , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 )-> int:
"""simple docstring"""
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , groups=UpperCAmelCase_ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_ ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(UpperCAmelCase_ )
UpperCamelCase = self.shortcut(UpperCAmelCase_ )
hidden_state += residual
UpperCamelCase = self.activation(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : RegNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , )-> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , ) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(depth - 1 )] , )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.layers(UpperCAmelCase_ )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : RegNetConfig )-> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True )-> BaseModelOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(UpperCAmelCase_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : int = RegNetConfig
UpperCamelCase_ : List[str] = '''regnet'''
UpperCamelCase_ : Tuple = '''pixel_values'''
UpperCamelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : int )-> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False )-> Any:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = value
SCREAMING_SNAKE_CASE = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : Any )-> Dict:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config
UpperCamelCase = RegNetEmbeddings(UpperCAmelCase_ )
UpperCamelCase = RegNetEncoder(UpperCAmelCase_ )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None )-> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(UpperCAmelCase_ )
UpperCamelCase = self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a ( _lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple )-> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config.num_labels
UpperCamelCase = RegNetModel(UpperCAmelCase_ )
# classification head
UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , )-> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(UpperCAmelCase_ )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = "single_label_classification"
else:
UpperCamelCase = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states )
| 556 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = FunnelTokenizer
UpperCamelCase_ : Any = FunnelTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any] )-> str:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase_ : Any )-> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Tuple )-> str:
"""simple docstring"""
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
UpperCamelCase = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 556 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a__ ( ) -> Tuple:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a__ ( ) -> Tuple:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', lowercase ):
pass
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', lowercase ) is None
with patch_submodule(_test_patching, '''len''', lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a__ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', lowercase ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', lowercase ):
pass
| 98 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_snake_case = """src/transformers"""
_snake_case = """docs/source/en"""
_snake_case = """."""
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.readlines()
# Find the start prompt.
lowercase__ = 0
while not lines[start_index].startswith(__magic_name__ ):
start_index += 1
start_index += 1
lowercase__ = start_index
while not lines[end_index].startswith(__magic_name__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_snake_case = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_snake_case = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _A ( __magic_name__ ):
lowercase__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __magic_name__ )
return [m.group(0 ) for m in matches]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = 2 if text == "✅" or text == "❌" else len(__magic_name__ )
lowercase__ = (width - text_length) // 2
lowercase__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _A ( ):
lowercase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ = collections.defaultdict(__magic_name__ )
lowercase__ = collections.defaultdict(__magic_name__ )
lowercase__ = collections.defaultdict(__magic_name__ )
lowercase__ = collections.defaultdict(__magic_name__ )
lowercase__ = collections.defaultdict(__magic_name__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(__magic_name__ ):
lowercase__ = None
if attr_name.endswith("Tokenizer" ):
lowercase__ = slow_tokenizers
lowercase__ = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
lowercase__ = fast_tokenizers
lowercase__ = attr_name[:-13]
elif _re_tf_models.match(__magic_name__ ) is not None:
lowercase__ = tf_models
lowercase__ = _re_tf_models.match(__magic_name__ ).groups()[0]
elif _re_flax_models.match(__magic_name__ ) is not None:
lowercase__ = flax_models
lowercase__ = _re_flax_models.match(__magic_name__ ).groups()[0]
elif _re_pt_models.match(__magic_name__ ) is not None:
lowercase__ = pt_models
lowercase__ = _re_pt_models.match(__magic_name__ ).groups()[0]
if lookup_dict is not None:
while len(__magic_name__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ = True
break
# Try again after removing the last word in the name
lowercase__ = "".join(camel_case_split(__magic_name__ )[:-1] )
# Let's build that table!
lowercase__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ = [len(__magic_name__ ) + 2 for c in columns]
lowercase__ = max([len(__magic_name__ ) for name in model_names] ) + 2
# Build the table per se
lowercase__ = "|" + "|".join([_center_text(__magic_name__ , __magic_name__ ) for c, w in zip(__magic_name__ , __magic_name__ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
lowercase__ = {True: "✅", False: "❌"}
for name in model_names:
lowercase__ = model_name_to_prefix[name]
lowercase__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__magic_name__ , __magic_name__ ) for l, w in zip(__magic_name__ , __magic_name__ )] ) + "|\n"
return table
def _A ( __magic_name__=False ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ = _find_text_in_file(
filename=os.path.join(__magic_name__ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
lowercase__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__magic_name__ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 611 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( __magic_name__ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(__magic_name__ )
lowercase__ = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , "__name__" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def _A ( __magic_name__ , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ):
lowercase__ = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__magic_name__ , encoding="utf-8" ) as reader:
return json.load(__magic_name__ )
class lowerCAmelCase :
def __init__( self :List[Any] ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def UpperCAmelCase ( cls :Tuple , _lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = kwargs.pop("config" , _lowercase )
lowercase__ = kwargs.pop("trust_remote_code" , _lowercase )
lowercase__ = True
lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase )
lowercase__ = config_dict.get("image_processor_type" , _lowercase )
lowercase__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase__ = config_dict.pop("feature_extractor_type" , _lowercase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowercase__ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
lowercase__ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
lowercase__ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.image_processor_type``
lowercase__ = getattr(_lowercase , "image_processor_type" , _lowercase )
if hasattr(_lowercase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowercase__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowercase__ = image_processor_class_from_name(_lowercase )
lowercase__ = image_processor_auto_map is not None
lowercase__ = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING
lowercase__ = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
lowercase__ = kwargs.pop("code_revision" , _lowercase )
if os.path.isdir(_lowercase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowercase , **_lowercase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING:
lowercase__ = IMAGE_PROCESSOR_MAPPING[type(_lowercase )]
return image_processor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCAmelCase ( _lowercase :Optional[int] , _lowercase :Dict ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
| 611 | 1 |
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCamelCase_ = 0, 0
# length[i] shows the length of palindromic substring with center i
lowerCamelCase_ = [1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
lowerCamelCase_ = 0
for j in range(len(__snake_case ) ):
lowerCamelCase_ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCamelCase_ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCamelCase_ = j - k + 1 # noqa: E741
lowerCamelCase_ = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCamelCase_ = length[j]
lowerCamelCase_ = j
# create that string
lowerCamelCase_ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : str = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = "timesformer"
def __init__( self : Optional[Any] , A : Tuple=2_24 , A : Optional[int]=16 , A : Any=3 , A : str=8 , A : Optional[Any]=7_68 , A : Dict=12 , A : Optional[int]=12 , A : Optional[Any]=30_72 , A : Optional[Any]="gelu" , A : Union[str, Any]=0.0 , A : Dict=0.0 , A : str=0.02 , A : Union[str, Any]=1e-6 , A : Union[str, Any]=True , A : Dict="divided_space_time" , A : Optional[Any]=0 , **A : List[str] , ) -> Tuple:
super().__init__(**A )
lowercase_ : Tuple = image_size
lowercase_ : str = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Optional[Any] = num_frames
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : List[str] = qkv_bias
lowercase_ : Any = attention_type
lowercase_ : Dict = drop_path_rate
| 231 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase="pt" ):
'''simple docstring'''
__lowercase = {'''add_prefix_space''': True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(''' ''' ) else {}
__lowercase = padding_side
return tokenizer(
[line] , max_length=_UpperCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , ):
'''simple docstring'''
__lowercase = input_ids.ne(_UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( lowercase_ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_="train" , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="" , ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = Path(UpperCamelCase__ ).joinpath(type_path + '''.source''' )
__lowercase = Path(UpperCamelCase__ ).joinpath(type_path + '''.target''' )
__lowercase = self.get_char_lens(self.src_file )
__lowercase = max_source_length
__lowercase = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
__lowercase = tokenizer
__lowercase = prefix
if n_obs is not None:
__lowercase = self.src_lens[:n_obs]
__lowercase = src_lang
__lowercase = tgt_lang
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = index + 1 # linecache starts at 1
__lowercase = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase__ ).rstrip('''\n''' )
__lowercase = linecache.getline(str(self.tgt_file ) , UpperCamelCase__ ).rstrip('''\n''' )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowercase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
)
__lowercase = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase__ ) else self.tokenizer
__lowercase = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_source_length , '''right''' )
__lowercase = encode_line(UpperCamelCase__ , UpperCamelCase__ , self.max_target_length , '''right''' )
__lowercase = source_inputs['''input_ids'''].squeeze()
__lowercase = target_inputs['''input_ids'''].squeeze()
__lowercase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( snake_case_ ) -> List[str]:
'''simple docstring'''
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def A ( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = torch.stack([x['''input_ids'''] for x in batch] )
__lowercase = torch.stack([x['''attention_mask'''] for x in batch] )
__lowercase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
__lowercase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
__lowercase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
__lowercase = trim_batch(UpperCamelCase__ , UpperCamelCase__ )
__lowercase , __lowercase = trim_batch(UpperCamelCase__ , UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__lowercase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a : List[Any] = getLogger(__name__)
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = get_git_info()
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''git_log.json''' ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , **_UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase ) as f:
return json.load(_UpperCamelCase )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = git.Repo(search_parent_directories=_UpperCamelCase )
__lowercase = {
'''repo_id''': str(_UpperCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return list(map(_UpperCamelCase , _UpperCamelCase ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , '''wb''' ) as f:
return pickle.dump(_UpperCamelCase , _UpperCamelCase )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
def remove_articles(_UpperCamelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _UpperCamelCase )
def white_space_fix(_UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase ):
__lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = normalize_answer(_UpperCamelCase ).split()
__lowercase = normalize_answer(_UpperCamelCase ).split()
__lowercase = Counter(_UpperCamelCase ) & Counter(_UpperCamelCase )
__lowercase = sum(common.values() )
if num_same == 0:
return 0
__lowercase = 1.0 * num_same / len(_UpperCamelCase )
__lowercase = 1.0 * num_same / len(_UpperCamelCase )
__lowercase = (2 * precision * recall) / (precision + recall)
return fa
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
__lowercase = 0
for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ):
em += exact_match_score(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
em /= len(_UpperCamelCase )
return {"em": em}
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowercase = '''dropout_rate'''
for p in extra_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
continue
__lowercase = p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p]
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
return hparams, config
| 711 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ) -> List[str]:
'''simple docstring'''
__lowercase = np.random.default_rng(snake_case_ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=_UpperCamelCase )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 527 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCamelCase = """examples/"""
lowerCamelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCamelCase = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCamelCase = """README.md"""
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase =f.read()
__lowercase , __lowercase =REPLACE_PATTERNS[pattern]
__lowercase =replace.replace('VERSION' , _lowerCAmelCase )
__lowercase =re_pattern.sub(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , pattern='examples' )
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not patch:
update_version_in_examples(_lowerCAmelCase )
def _A ( ):
"""simple docstring"""
__lowercase ='🤗 Transformers currently provides the following architectures'
__lowercase ='1. Want to contribute a new model?'
with open(_lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase =f.readlines()
# Find the start of the list.
__lowercase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__lowercase =lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(_lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_lowerCAmelCase )
def _A ( ):
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r' ) as f:
__lowercase =f.read()
__lowercase =REPLACE_PATTERNS['init'][0].search(_lowerCAmelCase ).groups()[0]
return packaging.version.parse(_lowerCAmelCase )
def _A ( _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__lowercase =default_version.base_version
elif patch:
__lowercase =f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__lowercase =f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__lowercase =input(f"""Which version are you releasing? [{default_version}]""" )
if len(_lowerCAmelCase ) == 0:
__lowercase =default_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase , patch=_lowerCAmelCase )
def _A ( ):
"""simple docstring"""
__lowercase =get_version()
__lowercase =f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__lowercase =current_version.base_version
# Check with the user we got that right.
__lowercase =input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_lowerCAmelCase ) == 0:
__lowercase =dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 474 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowercase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0)
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowercase =CLIPTextModel(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=0):
'''simple docstring'''
if str(_lowerCAmelCase).startswith('mps'):
__lowercase =torch.manual_seed(_lowerCAmelCase)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase ={
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np')
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np')
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
__lowercase =output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 474 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
__lowercase = 0
__lowercase = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase = i + 1
else:
__lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 710 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,) -> None:
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_lowerCamelCase ) + abs(_lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__(self ,_lowerCamelCase ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_lowerCamelCase )
__lowercase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_lowerCamelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
__lowercase = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
return [self.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[Node]:
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase ,_lowerCamelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_lowerCamelCase ,) )
return successors
def _UpperCAmelCase (self ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> None:
'''simple docstring'''
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = AStar(_lowerCamelCase ,_lowerCamelCase )
__lowercase = False
def _UpperCAmelCase (self ) -> list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_lowerCamelCase ,_lowerCamelCase )
self.fwd_astar.closed_nodes.append(_lowerCamelCase )
self.bwd_astar.closed_nodes.append(_lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_lowerCamelCase )
else:
astar.open_nodes.append(_lowerCamelCase )
return [self.fwd_astar.start.pos]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> list[TPosition]:
'''simple docstring'''
__lowercase = self.fwd_astar.retrace_path(_lowerCamelCase )
__lowercase = self.bwd_astar.retrace_path(_lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 56 | 0 |
"""simple docstring"""
import math
def __A ( a_ : List[Any] )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [True] * n
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE : Optional[int] = i * 2
while index < n:
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = index + i
SCREAMING_SNAKE_CASE : Optional[int] = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def __A ( a_ : Optional[Any] = 99_99_66_66_33_33 )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = math.floor(math.sqrt(_lowerCAmelCase ) ) + 1_00
SCREAMING_SNAKE_CASE : int = prime_sieve(_lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : int = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE : int = primes[prime_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = last_prime**2
SCREAMING_SNAKE_CASE : str = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE : Optional[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 698 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 8 , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_pad
__lowercase =pad_size
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str):
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None):
'''simple docstring'''
__lowercase , __lowercase =get_image_size(_lowerCAmelCase)
__lowercase =(old_height // size + 1) * size - old_height
__lowercase =(old_width // size + 1) * size - old_width
return pad(_lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase =do_pad if do_pad is not None else self.do_pad
__lowercase =pad_size if pad_size is not None else self.pad_size
__lowercase =make_list_of_images(_lowerCAmelCase)
if not valid_images(_lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
__lowercase =[to_numpy_array(_lowerCAmelCase) for image in images]
if do_rescale:
__lowercase =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase) for image in images]
if do_pad:
__lowercase =[self.pad(_lowerCAmelCase , size=_lowerCAmelCase) for image in images]
__lowercase =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase) for image in images]
__lowercase ={'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase)
| 474 | 0 |
from __future__ import annotations
def lowercase ( a , a , a ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowercase ):
def _snake_case ( self : List[Any] , UpperCAmelCase : str):
with open(UpperCAmelCase , encoding="utf-8") as input_file:
SCREAMING_SNAKE_CASE_ :Optional[int] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
SCREAMING_SNAKE_CASE_ :Any = input_file.read()
SCREAMING_SNAKE_CASE_ :Any = regexp.search(UpperCAmelCase)
return match
def _snake_case ( self : List[Any] , UpperCAmelCase : str):
with open(UpperCAmelCase , encoding="utf-8") as input_file:
SCREAMING_SNAKE_CASE_ :Optional[int] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
SCREAMING_SNAKE_CASE_ :Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE_ :Any = regexp.finditer(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :List[Any] = Path("./datasets")
SCREAMING_SNAKE_CASE_ :Any = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase)):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}")
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Tuple = Path("./datasets")
SCREAMING_SNAKE_CASE_ :int = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase)):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead.")
| 140 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__lowercase ) , torch_builtin(__lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowercase ) , gelu_new(__lowercase ) ) )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__UpperCAmelCase : List[str] = get_activation("""gelu""" )
__UpperCAmelCase : Tuple = get_activation("""gelu_10""" )
__UpperCAmelCase : int = torch_builtin(__lowercase )
__UpperCAmelCase : Union[str, Any] = geluaa(__lowercase )
__UpperCAmelCase : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__lowercase ):
get_activation("""bogus""" )
with self.assertRaises(__lowercase ):
get_activation(__lowercase )
def UpperCAmelCase ( self : str ) -> int:
__UpperCAmelCase : Tuple = get_activation("""gelu""" )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Any = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__lowercase ):
__UpperCAmelCase : List[Any] = acta.a
| 63 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Dict = logging.get_logger(__name__)
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__lowercase : Dict ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase : List[Any] = deprecated_arg[3:]
setattr(self , __lowercase , not kwargs.pop(__lowercase ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__UpperCAmelCase : str = kwargs.pop("""torchscript""" , self.torchscript )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowercase )
a : bool = field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
a : bool = field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
a : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase ( self : Any ) -> Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__UpperCAmelCase : str = torch.device("""cpu""" )
__UpperCAmelCase : int = 0
elif is_torch_tpu_available():
__UpperCAmelCase : Tuple = xm.xla_device()
__UpperCAmelCase : int = 0
else:
__UpperCAmelCase : Dict = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase ( self : Optional[Any] ) -> str:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase ( self : List[str] ) -> int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase ( self : int ) -> "torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase ( self : int ) -> List[Any]:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
return self.n_gpu > 0
| 63 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A_ ( snake_case : Any=None , snake_case : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = field(
metadata={'help': 'The csv file to plot.'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
_snake_case = list_field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
try:
int(snake_case )
return True
except ValueError:
return False
def A_ ( snake_case : int ) -> Any:
'''simple docstring'''
try:
float(snake_case )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = args
__UpperCamelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__UpperCamelCase = csv.DictReader(SCREAMING_SNAKE_CASE_ )
for row in reader:
__UpperCamelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__UpperCamelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__UpperCamelCase = float(row['''result'''] )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = plt.subplots()
__UpperCamelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
__UpperCamelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__UpperCamelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__UpperCamelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__UpperCamelCase = self.result_dict[model_name]['''result''']
((__UpperCamelCase) , (__UpperCamelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__UpperCamelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__UpperCamelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=SCREAMING_SNAKE_CASE_ , )
else:
__UpperCamelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__UpperCamelCase) , (__UpperCamelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__UpperCamelCase = np.asarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[: len(SCREAMING_SNAKE_CASE_ )]
plt.scatter(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''--''' )
title_str += F" {label_model_name} vs."
__UpperCamelCase = title_str[:-4]
__UpperCamelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(SCREAMING_SNAKE_CASE_ )
plt.xlabel(SCREAMING_SNAKE_CASE_ )
plt.ylabel(SCREAMING_SNAKE_CASE_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A_ ( ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = HfArgumentParser(snake_case )
__UpperCamelCase = parser.parse_args_into_dataclasses()[0]
__UpperCamelCase = Plot(args=snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 451 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def A__ ( self )-> Optional[int]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_snake_case = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def A__ ( self )-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__UpperCamelCase , __UpperCamelCase = torchaudio.load(snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self )-> List[str]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__UpperCamelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 451 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 250
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, length) , device=_lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(5 )
SCREAMING_SNAKE_CASE_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_tensors(5 )
SCREAMING_SNAKE_CASE_ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
SCREAMING_SNAKE_CASE_ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowerCAmelCase ) , 1 ) | 31 |
UpperCAmelCase_ = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
UpperCAmelCase_ = ["""a""", """b""", """c""", """d""", """e"""]
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = start
# add current to visited
visited.append(lowercase )
lowercase_ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase_ : List[str] = topological_sort(lowercase , lowercase , lowercase )
# if all neighbors visited add current to sort
sort.append(lowercase )
# if all vertices haven't been visited select a new one to visit
if len(lowercase ) != len(lowercase ):
for vertice in vertices:
if vertice not in visited:
lowercase_ : Optional[Any] = topological_sort(lowercase , lowercase , lowercase )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort("""a""", [], [])
print(sort) | 458 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : Any ) -> Tuple:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 717 | '''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase__ ( _lowercase : list[int] ) -> list[int]:
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: Optional[Any] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase: list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase: Optional[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
__UpperCAmelCase: Optional[int] = 0
for b in range(_lowercase ):
for i in buckets[b]:
__UpperCAmelCase: str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 466 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase : Optional[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _lowercase ( cls : List[str] ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : Dict ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _lowercase ( self : Any ):
snake_case__ : List[str] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case__ : str = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id="test-config" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def _lowercase ( self : Dict ):
snake_case__ : Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case__ : Tuple = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id="valid_org/test-config-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def _lowercase ( self : List[str] ):
CustomConfig.register_for_auto_class()
snake_case__ : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 4_2 )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
snake_case__ : int = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case__ : Union[str, Any] = c.n_embd + 1 # int
snake_case__ : Tuple = c.resid_pdrop + 1.0 # float
snake_case__ : Union[str, Any] = not c.scale_attn_weights # bool
snake_case__ : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__A , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__A , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__A , c.summary_type , "mismatch for key: summary_type" )
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = PretrainedConfig()
snake_case__ : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case__ : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f''' {', '.join(__A )}.''' )
def _lowercase ( self : Optional[int] ):
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case__ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case__ : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__A )
def _lowercase ( self : List[str] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Union[str, Any] = mock.Mock()
snake_case__ : Union[str, Any] = 5_0_0
snake_case__ : int = {}
snake_case__ : Any = HTTPError
snake_case__ : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : str = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _lowercase ( self : str ):
snake_case__ : List[str] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case__ : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
snake_case__ : int = 2
json.dump(configuration.to_dict() , open(os.path.join(__A , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case__ : Tuple = AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case__ : Dict = ["config.42.0.0.json"]
snake_case__ : Optional[int] = 7_6_8
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , "config.4.0.0.json" ) , os.path.join(__A , "config.42.0.0.json" ) )
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def _lowercase ( self : Any ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
snake_case__ : Any = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case__ : Optional[Any] = "v4.0.0"
snake_case__, snake_case__ : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case__ : Dict = "v3.0.0"
snake_case__ : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 297 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["pixel_values"]
def __init__( self : Optional[int] , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Dict[str, int]] = None , lowerCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase_ : bool = True , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
_lowercase : Optional[Any] = size if size is not None else {'shortest_edge': 2_5_6}
_lowercase : Optional[int] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_lowercase : List[str] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
_lowercase : Any = get_size_dict(lowerCamelCase_ )
_lowercase : Optional[int] = do_resize
_lowercase : Optional[int] = size
_lowercase : Union[str, Any] = resample
_lowercase : Optional[Any] = do_center_crop
_lowercase : Union[str, Any] = crop_size
_lowercase : Any = do_rescale
_lowercase : Tuple = rescale_factor
_lowercase : Optional[int] = do_normalize
_lowercase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Tuple , ):
"""simple docstring"""
_lowercase : Optional[Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : Dict = get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : str , ):
"""simple docstring"""
_lowercase : Tuple = get_size_dict(lowerCamelCase_ )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : float , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Union[float, List[float]] , lowerCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : ImageInput , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : PILImageResampling = None , lowerCamelCase_ : bool = None , lowerCamelCase_ : Dict[str, int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[float, List[float]]] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase_ : int , ):
"""simple docstring"""
_lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : List[Any] = size if size is not None else self.size
_lowercase : Union[str, Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
_lowercase : Any = resample if resample is not None else self.resample
_lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(lowerCamelCase_ )
_lowercase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : str = image_mean if image_mean is not None else self.image_mean
_lowercase : List[Any] = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase : Optional[Any] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
_lowercase : List[Any] = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
_lowercase : str = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
_lowercase : Any = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
_lowercase : int = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
_lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 283 | 1 |
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ , UpperCamelCase_ = len(__lowercase), len(grid[0])
if (
min(__lowercase , __lowercase) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col))
UpperCamelCase_ = 0
count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase)
count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase)
count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase)
count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase)
visit.remove((row, col))
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
snake_case__ : int = '''Input must be a string of 8 numbers plus letter'''
snake_case__ : Optional[int] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase ( _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = F'''Expected string as input, found {type(_lowerCAmelCase ).__name__}'''
raise TypeError(_lowerCAmelCase )
UpperCAmelCase__ = spanish_id.replace("""-""" , """""" ).upper()
if len(_lowerCAmelCase ) != 9:
raise ValueError(_lowerCAmelCase )
try:
UpperCAmelCase__ = int(spanish_id_clean[0:8] )
UpperCAmelCase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(_lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A__: Optional[Any] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
A__: Dict = 10
A__: str = 256
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ) -> Optional[MinHash]:
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
_a : Optional[int] =MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class A__ :
def __init__( self :int , *,
SCREAMING_SNAKE_CASE :float = 0.85 , ) -> str:
'''simple docstring'''
_a : Optional[Any] =duplication_jaccard_threshold
_a : List[Any] =NUM_PERM
_a : Optional[Any] =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a : List[Any] =defaultdict(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :MinHash ) -> None:
'''simple docstring'''
_a : Tuple =self._index.query(SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> List[List[Dict]]:
'''simple docstring'''
_a : Union[str, Any] =[]
for base, duplicates in self._duplicate_clusters.items():
_a : Any =[base] + list(SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
_a : Optional[int] =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(SCREAMING_SNAKE_CASE )
return duplicate_clusters
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int] ) -> None:
'''simple docstring'''
_a : Optional[Any] =self.get_duplicate_clusters()
with open(SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
_a , _a : Union[str, Any] =element
_a : Tuple =get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Type[Dataset] ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_UpperCAmelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Type[Dataset] ,_UpperCAmelCase : float ) -> str:
_a : int =DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) ,max_queue_size=100 ) ):
di.add(_UpperCAmelCase ,_UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> float:
_a : Optional[Any] =get_tokens(_UpperCAmelCase )
_a : int =get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A__: List[Any] = None
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Optional[Any] =[]
for elementa in cluster:
_a : Optional[int] =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_a : Optional[Any] =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_UpperCAmelCase ,_UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a : Tuple =1
extremes.append(_UpperCAmelCase )
return extremes
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Any:
global _shared_dataset
_a : List[str] =dataset
_a : Union[str, Any] =[]
_a : Any =partial(_find_cluster_extremes_shared ,jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase ,_UpperCAmelCase ,) ,total=len(_UpperCAmelCase ) ,):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Type[Dataset] ,_UpperCAmelCase : float = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_a : List[Any] =make_duplicate_clusters(_UpperCAmelCase ,_UpperCAmelCase )
_a : int ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_a : Union[str, Any] ={}
_a : Optional[int] =find_extremes(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
_a : List[Any] =element
_a : Union[str, Any] =duplicate_indices - set(extreme_dict.keys() )
_a : List[Any] =dataset.filter(lambda _UpperCAmelCase ,_UpperCAmelCase : idx not in remove_indices ,with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a : Any =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_a : List[Any] =extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(_UpperCAmelCase )}" )
print(F"Number of duplicate clusters: {len(_UpperCAmelCase )}" )
print(F"Files in duplicate cluster: {len(_UpperCAmelCase )}" )
print(F"Unique files in duplicate cluster: {len(_UpperCAmelCase )}" )
print(F"Filtered dataset size: {len(_UpperCAmelCase )}" )
return ds_filter, duplicate_clusters
| 506 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> float:
_a : Union[str, Any] =0
while len(_UpperCAmelCase ) > 1:
_a : Any =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_a : Optional[int] =files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["""pixel_values"""]
def __init__( self : List[str] , lowercase : Union[str, Any] = True , lowercase : Union[str, Any] = None , lowercase : Tuple = PILImageResampling.BILINEAR , lowercase : Any = True , lowercase : Optional[int] = None , lowercase : Any = True , lowercase : int = 1 / 255 , lowercase : List[str] = True , lowercase : str = None , lowercase : Optional[Any] = None , **lowercase : Tuple , ):
'''simple docstring'''
super().__init__(**__a )
_snake_case = size if size is not None else {'shortest_edge': 256}
_snake_case = get_size_dict(__a , default_to_square=__a )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(__a )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Any , lowercase : Dict , lowercase : List[str] , lowercase : Union[str, Any] = PILImageResampling.BICUBIC , lowercase : str = None , **lowercase : Dict , ):
'''simple docstring'''
_snake_case = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def A ( self : int , lowercase : Any , lowercase : int , lowercase : List[Any] = None , **lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = get_size_dict(__a )
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a )
def A ( self : Tuple , lowercase : Dict , lowercase : List[Any] , lowercase : Dict = None , **lowercase : List[Any] ):
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a )
def A ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : int , lowercase : Tuple , lowercase : Tuple = None , **lowercase : Dict , ):
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def A ( self : List[Any] , lowercase : Tuple , lowercase : int = None , lowercase : Optional[Any] = None , lowercase : int = None , lowercase : Dict = None , lowercase : Optional[int] = None , lowercase : Any = None , lowercase : int = None , lowercase : Any = None , lowercase : Union[str, Any] = None , lowercase : Dict = None , lowercase : Union[str, Any] = None , lowercase : Tuple = ChannelDimension.FIRST , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(__a , default_to_square=__a )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(__a )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(__a ) for image in images]
if do_resize:
_snake_case = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_snake_case = [to_channel_dimension_format(__a , __a ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a ) | 686 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
A__ , A__ = grid.shape
A__ = [-1, 1, 0, 0]
A__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A__ , A__ = [(0, source)], set()
A__ = np.full((rows, cols) ,np.inf )
A__ = 0
A__ = np.empty((rows, cols) ,dtype=lowerCAmelCase__ )
A__ = None
while queue:
((A__) , (A__)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A__ = []
while (x, y) != source:
path.append((x, y) )
A__ , A__ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
A__ , A__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ ,(dist + 1, (nx, ny)) )
A__ = dist + 1
A__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
A__: int = TypeVar('''_T''')
class A__ ( Generic[_T] ):
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] = None ) -> Any:
'''simple docstring'''
_a : list[_T] =list(iterable or [] )
_a : list[_T] =[]
def __len__( self :Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self :List[str] ) -> Any:
'''simple docstring'''
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :List[str] ) -> Optional[Any]:
'''simple docstring'''
self._stacka.append(UpperCamelCase_ )
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : str =self._stacka.pop
_a : Union[str, Any] =self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A__: Any = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Dict = ["pixel_values"]
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE :Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : Any =size if size is not None else {"""shortest_edge""": 2_5_6}
_a : int =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Union[str, Any] =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[Any] =do_resize
_a : Optional[int] =size
_a : Union[str, Any] =resample
_a : List[Any] =do_center_crop
_a : Optional[Any] =crop_size
_a : List[Any] =do_rescale
_a : Optional[int] =rescale_factor
_a : int =do_normalize
_a : Tuple =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Any =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Any , ) -> np.ndarray:
'''simple docstring'''
_a : Any =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_a : Union[str, Any] =get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
_a : List[str] =get_size_dict(SCREAMING_SNAKE_CASE )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Union[float, List[float]] , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[float] = None , SCREAMING_SNAKE_CASE :Optional[bool] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[Any] =do_resize if do_resize is not None else self.do_resize
_a : int =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
_a : int =resample if resample is not None else self.resample
_a : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
_a : Any =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Any =do_rescale if do_rescale is not None else self.do_rescale
_a : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
_a : str =do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
_a : Optional[Any] =image_std if image_std is not None else self.image_std
_a : List[Any] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : int =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_a : Tuple =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
_a : Union[str, Any] =[self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
_a : int =[self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_a : Any =[self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
_a : Tuple =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : Any ={"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 506 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict ):
'''simple docstring'''
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : str = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : Optional[int] = {"""text""": """string"""}
snake_case_ : Any = features.copy() if features else default_expected_features
snake_case_ : Optional[int] = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : List[str] = TextDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """cache"""
snake_case_ : int = {"""text""": """string"""}
snake_case_ : List[str] = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
if issubclass(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : Union[str, Any] = text_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_ ):
snake_case_ : str = [text_path]
snake_case_ : List[Any] = tmp_path / """cache"""
snake_case_ : Tuple = {"""text""": """string"""}
snake_case_ : Union[str, Any] = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_dataset(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for split in splits:
snake_case_ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tmp_path / """cache"""
snake_case_ : List[str] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ : List[str] = TextDatasetReader({"""train""": text_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ : str = {"""text""": """string"""}
snake_case_ : List[str] = features.copy() if features else default_expected_features
snake_case_ : int = (
Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ : List[str] = TextDatasetReader({"""train""": text_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
if split:
snake_case_ : Union[str, Any] = {split: text_path}
else:
snake_case_ : List[str] = """train"""
snake_case_ : List[Any] = {"""train""": text_path, """test""": text_path}
snake_case_ : Dict = tmp_path / """cache"""
snake_case_ : List[Any] = {"""text""": """string"""}
snake_case_ : Dict = TextDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read()
_check_text_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 334 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A : List[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ : int = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }') | 334 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = torch.exp(__snake_case )
snake_case__ : Tuple = torch.sum(__snake_case , dim=1 ) # sum of exp(x_i)
snake_case__ : Optional[int] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class a ( nn.Module ):
def __init__( self :int ,__lowercase :List[Any] ):
super().__init__()
snake_case__ : List[Any] = config.output_attentions
snake_case__ : List[str] = config.output_hidden_states
snake_case__ : List[Any] = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case__ : Union[str, Any] = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case__ : str = [-1 for _ in range(config.num_hidden_layers )]
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ):
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case__ : Optional[int] = x
else:
snake_case__ : Optional[int] = x
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Tuple ):
snake_case__ : Dict = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCamelCase ( self :Dict ,__lowercase :Dict ,__lowercase :Tuple=None ,__lowercase :List[Any]=None ,__lowercase :Optional[Any]=None ,__lowercase :List[str]=None ,):
snake_case__ : Optional[Any] = ()
snake_case__ : List[Any] = ()
snake_case__ : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case__ : Tuple = all_hidden_states + (hidden_states,)
snake_case__ : Dict = layer_module(
__lowerCAmelCase ,__lowerCAmelCase ,head_mask[i] ,__lowerCAmelCase ,__lowerCAmelCase )
snake_case__ : Optional[Any] = layer_outputs[0]
if self.output_attentions:
snake_case__ : str = all_attentions + (layer_outputs[1],)
snake_case__ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
snake_case__ : Optional[Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ : List[str] = current_outputs + (all_attentions,)
snake_case__ : Optional[int] = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
snake_case__ : Union[str, Any] = highway_exit[0]
snake_case__ : List[str] = entropy(__lowerCAmelCase )
snake_case__ : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case__ : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case__ : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase ,i + 1 )
else:
snake_case__ : Tuple = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case__ : int = all_hidden_states + (hidden_states,)
snake_case__ : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
snake_case__ : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case__ : str = outputs + (all_attentions,)
snake_case__ : Union[str, Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , __lowerCamelCase , )
class a ( __lowerCamelCase ):
def __init__( self :List[Any] ,__lowercase :int ):
super().__init__(__lowerCAmelCase )
snake_case__ : Optional[Any] = config
snake_case__ : List[Any] = BertEmbeddings(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DeeBertEncoder(__lowerCAmelCase )
snake_case__ : Optional[int] = BertPooler(__lowerCAmelCase )
self.init_weights()
def __lowerCamelCase ( self :Optional[int] ):
self.encoder.init_highway_pooler(self.pooler )
def __lowerCamelCase ( self :int ):
return self.embeddings.word_embeddings
def __lowerCamelCase ( self :int ,__lowercase :List[str] ):
snake_case__ : Optional[Any] = value
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[Any] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any]=None ,__lowercase :int=None ,__lowercase :Optional[int]=None ,__lowercase :Any=None ,__lowercase :Union[str, Any]=None ,__lowercase :Union[str, Any]=None ,__lowercase :Dict=None ,__lowercase :List[str]=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case__ : List[Any] = input_ids.size()
elif inputs_embeds is not None:
snake_case__ : Union[str, Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case__ : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case__ : int = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if encoder_attention_mask is None:
snake_case__ : Dict = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if token_type_ids is None:
snake_case__ : int = torch.zeros(__lowerCAmelCase ,dtype=torch.long ,device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case__ : str = self.get_extended_attention_mask(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case__ : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case__ : Optional[Any] = encoder_attention_mask[:, None, None, :]
snake_case__ : Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case__ : List[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case__ : str = self.get_head_mask(__lowerCAmelCase ,self.config.num_hidden_layers )
snake_case__ : List[Any] = self.embeddings(
input_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase )
snake_case__ : Any = self.encoder(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,)
snake_case__ : List[Any] = encoder_outputs[0]
snake_case__ : Tuple = self.pooler(__lowerCAmelCase )
snake_case__ : Any = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :str ,__lowercase :Tuple ):
snake_case__ : str = message
snake_case__ : Any = exit_layer # start from 1!
class a ( nn.Module ):
def __init__( self :List[Any] ,__lowercase :Union[str, Any] ):
super().__init__()
snake_case__ : Optional[Any] = BertPooler(__lowerCAmelCase )
snake_case__ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : List[str] = nn.Linear(config.hidden_size ,config.num_labels )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[Any] ):
snake_case__ : Tuple = encoder_outputs[0]
snake_case__ : Any = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
snake_case__ : int = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case__ : str = bmodel_output[1]
snake_case__ : Any = self.dropout(__lowerCAmelCase )
snake_case__ : int = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , __lowerCamelCase , )
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :Tuple ):
super().__init__(__lowerCAmelCase )
snake_case__ : List[Any] = config.num_labels
snake_case__ : Optional[int] = config.num_hidden_layers
snake_case__ : Any = DeeBertModel(__lowerCAmelCase )
snake_case__ : Tuple = nn.Dropout(config.hidden_dropout_prob )
snake_case__ : str = nn.Linear(config.hidden_size ,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Any=None ,__lowercase :List[Any]=None ,__lowercase :Union[str, Any]=None ,__lowercase :Dict=None ,__lowercase :Union[str, Any]=None ,__lowercase :Tuple=None ,__lowercase :Optional[Any]=None ,__lowercase :Dict=-1 ,__lowercase :List[str]=False ,):
snake_case__ : int = self.num_layers
try:
snake_case__ : Tuple = self.bert(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case__ : str = outputs[1]
snake_case__ : str = self.dropout(__lowerCAmelCase )
snake_case__ : Union[str, Any] = self.classifier(__lowerCAmelCase )
snake_case__ : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case__ : List[str] = e.message
snake_case__ : Tuple = e.exit_layer
snake_case__ : List[Any] = outputs[0]
if not self.training:
snake_case__ : Any = entropy(__lowerCAmelCase )
snake_case__ : Dict = []
snake_case__ : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case__ : List[Any] = MSELoss()
snake_case__ : Optional[int] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ : Optional[Any] = CrossEntropyLoss()
snake_case__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
snake_case__ : Any = []
for highway_exit in outputs[-1]:
snake_case__ : str = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case__ : Tuple = MSELoss()
snake_case__ : Union[str, Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
snake_case__ : int = CrossEntropyLoss()
snake_case__ : Any = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
snake_case__ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case__ : Tuple = (loss,) + outputs
if not self.training:
snake_case__ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case__ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 707 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase ( lowercase_ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_A )
lowercase_ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowercase_ : Dict = get_size_dict(_A ,default_to_square=_A )
lowercase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase_ : Dict = get_size_dict(_A ,param_name='crop_size' )
lowercase_ : Union[str, Any] = do_resize
lowercase_ : Union[str, Any] = size
lowercase_ : Optional[Any] = do_center_crop
lowercase_ : Dict = crop_size
lowercase_ : Any = resample
lowercase_ : Optional[int] = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : Tuple = offset
lowercase_ : Dict = do_normalize
lowercase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = PILImageResampling.BILINEAR ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Dict = get_size_dict(_A ,default_to_square=_A )
if "shortest_edge" in size:
lowercase_ : List[Any] = get_resize_output_image_size(_A ,size['shortest_edge'] ,default_to_square=_A )
elif "height" in size and "width" in size:
lowercase_ : str = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_A ,size=_A ,resample=_A ,data_format=_A ,**_A )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : int = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_A ,size=(size['height'], size['width']) ,data_format=_A ,**_A )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : str = image.astype(np.floataa )
if offset:
lowercase_ : Optional[int] = image - (scale / 2)
return rescale(_A ,scale=_A ,data_format=_A ,**_A )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_A ,mean=_A ,std=_A ,data_format=_A ,**_A )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
lowercase_ : List[Any] = to_numpy_array(_A )
if do_resize:
lowercase_ : Optional[Any] = self.resize(image=_A ,size=_A ,resample=_A )
if do_center_crop:
lowercase_ : int = self.center_crop(_A ,size=_A )
if do_rescale:
lowercase_ : Any = self.rescale(image=_A ,scale=_A ,offset=_A )
if do_normalize:
lowercase_ : Dict = self.normalize(image=_A ,mean=_A ,std=_A )
lowercase_ : Optional[Any] = to_channel_dimension_format(_A ,_A )
return image
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : List[Any] = resample if resample is not None else self.resample
lowercase_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Union[str, Any] = offset if offset is not None else self.offset
lowercase_ : int = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Dict = size if size is not None else self.size
lowercase_ : List[Any] = get_size_dict(_A ,default_to_square=_A )
lowercase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Optional[Any] = get_size_dict(_A ,param_name='crop_size' )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowercase_ : Dict = make_batched(_A )
lowercase_ : int = [
[
self._preprocess_image(
image=_A ,do_resize=_A ,size=_A ,resample=_A ,do_center_crop=_A ,crop_size=_A ,do_rescale=_A ,rescale_factor=_A ,offset=_A ,do_normalize=_A ,image_mean=_A ,image_std=_A ,data_format=_A ,)
for img in video
]
for video in videos
]
lowercase_ : Optional[int] = {'''pixel_values''': videos}
return BatchFeature(data=_A ,tensor_type=_A )
| 425 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCamelCase : Any = tuple[int, int]
class snake_case__ :
def __init__( self : List[str] , _A : set[int] , _A : Mapping[EdgeT, int] ) -> None:
UpperCAmelCase_ : set[int] = vertices
UpperCAmelCase_ : dict[EdgeT, int] = {
(min(_A ), max(_A )): weight for edge, weight in edges.items()
}
def A ( self : Union[str, Any] , _A : EdgeT , _A : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ : List[str] = weight
def A ( self : str ) -> Graph:
UpperCAmelCase_ : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ : Optional[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ : Dict = edge
UpperCAmelCase_ : Tuple = weight
subgraph.add_edge(_A , _A )
return subgraph
def __UpperCAmelCase ( A : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(A ) )
UpperCAmelCase_ : str = os.path.join(A , A )
UpperCAmelCase_ : dict[EdgeT, int] = {}
UpperCAmelCase_ : list[str]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
with open(A ) as f:
UpperCAmelCase_ : int = f.read().strip().split('''\n''' )
UpperCAmelCase_ : Any = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(A ) ):
for edgea in range(A ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ : Graph = Graph(set(range(len(A ) ) ) , A )
UpperCAmelCase_ : Graph = graph.prims_algorithm()
UpperCAmelCase_ : int = sum(graph.edges.values() )
UpperCAmelCase_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 541 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__A : Optional[int] = list[list[float | int]]
def lowerCamelCase_ ( lowercase__ , lowercase__):
lowerCamelCase__ = len(__UpperCAmelCase)
lowerCamelCase__ = [[0 for _ in range(size + 1)] for _ in range(__UpperCAmelCase)]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for row in range(__UpperCAmelCase):
for col in range(__UpperCAmelCase):
lowerCamelCase__ = matrix[row][col]
lowerCamelCase__ = vector[row][0]
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while row < size and col < size:
# pivoting
lowerCamelCase__ = max((abs(augmented[rowa][col]), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCamelCase__ , lowerCamelCase__ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase):
lowerCamelCase__ = augmented[rowa][col] / augmented[row][col]
lowerCamelCase__ = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase):
for row in range(__UpperCAmelCase):
lowerCamelCase__ = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(__UpperCAmelCase)
]
def lowerCamelCase_ ( lowercase__):
lowerCamelCase__ = len(__UpperCAmelCase)
lowerCamelCase__ = [[0 for _ in range(__UpperCAmelCase)] for _ in range(__UpperCAmelCase)]
lowerCamelCase__ = [[0] for _ in range(__UpperCAmelCase)]
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for x_val, y_val in enumerate(__UpperCAmelCase):
for col in range(__UpperCAmelCase):
lowerCamelCase__ = (x_val + 1) ** (size - col - 1)
lowerCamelCase__ = y_val
lowerCamelCase__ = solve(__UpperCAmelCase , __UpperCAmelCase)
def interpolated_func(lowercase__) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase))
return interpolated_func
def lowerCamelCase_ ( lowercase__):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_ ( lowercase__ = question_function , lowercase__ = 10):
lowerCamelCase__ = [func(__UpperCAmelCase) for x_val in range(1 , order + 1)]
lowerCamelCase__ = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
lowerCamelCase__ = 0
lowerCamelCase__ = 42
lowerCamelCase__ = 42
for poly in polynomials:
lowerCamelCase__ = 1
while func(__UpperCAmelCase) == poly(__UpperCAmelCase):
x_val += 1
ret += poly(__UpperCAmelCase)
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
'''simple docstring'''
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float(moles / volume) * nfactor)
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((moles * 0.0_821 * temperature) / (volume)))
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((moles * 0.0_821 * temperature) / (pressure)))
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
return round(float((pressure * volume) / (0.0_821 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class UpperCAmelCase_ ( snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase =Features({"text": Value("string" )} )
UpperCamelCase =Features({"labels": ClassLabel} )
UpperCamelCase ="text"
UpperCamelCase ="labels"
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
__lowercase : int = copy.deepcopy(self )
__lowercase : Union[str, Any] = self.label_schema.copy()
__lowercase : int = features[self.label_column]
__lowercase : List[str] = label_schema
return task_template
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 76 |
"""simple docstring"""
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __UpperCAmelCase ( __UpperCamelCase ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase : str = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(__UpperCamelCase )
__lowercase : Any = ''''''.join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data )
__lowercase : List[str] = len(__UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowercase : int = B'''=''' * ((6 - len(__UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6)
else:
__lowercase : Any = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( __UpperCamelCase ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase : List[str] = (
'''argument should be a bytes-like object or ASCII string, '''
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(__UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
__lowercase : List[str] = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
__lowercase : Dict = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowercase : Tuple = encoded_data[:-padding]
__lowercase : str = ''''''.join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowercase : Any = ''''''.join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
__lowercase : int = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__UpperCamelCase ) , 8 )
]
return bytes(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 702 | import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 77 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
a_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
a_ = {
'ctrl': 2_5_6,
}
a_ = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = set()
__lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : Any = char
__lowercase : List[Any] = set(__UpperCamelCase )
return pairs
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =CONTROL_CODES
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int:
super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[Any] = json.load(UpperCamelCase_ )
__lowercase : Any = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
__lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
__lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges]
__lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowercase : Optional[Any] = {}
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return len(self.encoder )
def _lowerCamelCase ( self ) -> Tuple:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
__lowercase : str = tuple(UpperCamelCase_ )
__lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowercase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase ,__lowercase : Tuple = bigram
__lowercase : int = []
__lowercase : Union[str, Any] = 0
while i < len(UpperCamelCase_ ):
try:
__lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Tuple = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : List[str] = tuple(UpperCamelCase_ )
__lowercase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowercase : List[str] = get_pairs(UpperCamelCase_ )
__lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ )
__lowercase : Dict = word[:-4]
__lowercase : str = word
return word
def _lowerCamelCase ( self , UpperCamelCase_ ) -> str:
__lowercase : List[Any] = []
__lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
__lowercase : List[str] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : Union[str, Any] = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 76 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( UpperCAmelCase__ ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Any:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
__a = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate('steps_offset!=1' , '1.0.0' , UpperCamelCase , standard_warn=UpperCamelCase )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(UpperCamelCase )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
__a = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate('skip_prk_steps not set' , '1.0.0' , UpperCamelCase , standard_warn=UpperCamelCase )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(UpperCamelCase )
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=UpperCamelCase , segmentation_processor=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , )
def UpperCamelCase__ ( self , UpperCamelCase = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[Any]:
self.enable_attention_slicing(UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ) -> Union[str, Any]:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 50 , UpperCamelCase = 7.5 , UpperCamelCase = None , UpperCamelCase = 1 , UpperCamelCase = 0.0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 1 , **UpperCamelCase , ) -> Optional[Any]:
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
__a = self.segmentation_model(**UpperCamelCase )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(UpperCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , height=UpperCamelCase , width=UpperCamelCase , num_inference_steps=UpperCamelCase , guidance_scale=UpperCamelCase , negative_prompt=UpperCamelCase , num_images_per_prompt=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , latents=UpperCamelCase , output_type=UpperCamelCase , return_dict=UpperCamelCase , callback=UpperCamelCase , callback_steps=UpperCamelCase , )
| 714 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCAmelCase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
UpperCAmelCase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = RoFormerTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ) -> int:
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , UpperCamelCase ) != strip_accents
):
__a = getattr(UpperCamelCase , pre_tok_state.pop('type' ) )
__a = do_lower_case
__a = strip_accents
__a = pre_tok_class(**UpperCamelCase )
__a = do_lower_case
def __getstate__( self ) -> List[Any]:
__a = self.__dict__.copy()
__a = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase ) -> Optional[int]:
__a = d
__a = self.__dict__['_tokenizer'].get_vocab()
__a = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None ) -> int:
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
__a = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ) -> List[Any]:
__a = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
| 490 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : int = SwinConfig(image_size=1_92 )
if "base" in model_name:
lowerCamelCase_ : Dict = 6
lowerCamelCase_ : Any = 1_28
lowerCamelCase_ : List[Any] = (2, 2, 18, 2)
lowerCamelCase_ : Dict = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase_ : List[Any] = 12
lowerCamelCase_ : List[Any] = 1_92
lowerCamelCase_ : Optional[Any] = (2, 2, 18, 2)
lowerCamelCase_ : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowerCamelCase_ : str = window_size
lowerCamelCase_ : Dict = embed_dim
lowerCamelCase_ : List[str] = depths
lowerCamelCase_ : Tuple = num_heads
return config
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
if "encoder.mask_token" in name:
lowerCamelCase_ : List[str] = name.replace('encoder.mask_token' ,'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowerCamelCase_ : Optional[int] = name.replace('encoder.patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowerCamelCase_ : Optional[int] = name.replace('encoder.patch_embed.norm' ,'embeddings.norm' )
if "attn.proj" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('attn' ,'attention.self' )
if "norm1" in name:
lowerCamelCase_ : Optional[int] = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
lowerCamelCase_ : Any = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
lowerCamelCase_ : List[str] = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase_ : str = name.replace('mlp.fc2' ,'output.dense' )
if name == "encoder.norm.weight":
lowerCamelCase_ : List[Any] = 'layernorm.weight'
if name == "encoder.norm.bias":
lowerCamelCase_ : Optional[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowerCamelCase_ : Any = 'swin.' + name
return name
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Tuple = orig_state_dict.pop(lowerCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase_ : Any = key.split('.' )
lowerCamelCase_ : int = int(key_split[2] )
lowerCamelCase_ : Optional[Any] = int(key_split[4] )
lowerCamelCase_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase_ : Optional[int] = val[:dim, :]
lowerCamelCase_ : str = val[
dim : dim * 2, :
]
lowerCamelCase_ : Optional[int] = val[-dim:, :]
else:
lowerCamelCase_ : Tuple = val[
:dim
]
lowerCamelCase_ : List[str] = val[
dim : dim * 2
]
lowerCamelCase_ : Dict = val[
-dim:
]
else:
lowerCamelCase_ : List[str] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Optional[Any] = torch.load(lowerCAmelCase__ ,map_location='cpu' )['model']
lowerCamelCase_ : Tuple = get_swin_config(lowerCAmelCase__ )
lowerCamelCase_ : Optional[Any] = SwinForMaskedImageModeling(lowerCAmelCase__ )
model.eval()
lowerCamelCase_ : int = convert_state_dict(lowerCAmelCase__ ,lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ : Any = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
lowerCamelCase_ : int = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ : Optional[int] = image_processor(images=lowerCAmelCase__ ,return_tensors='pt' )
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(**lowerCAmelCase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
_lowercase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase : int =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowercase : List[str] =5_0000
_lowercase : str =5000
_lowercase , _lowercase : List[str] =os.path.split(__file__)
_lowercase : Union[str, Any] =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i : i + batch_size]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = dataset[i : i + batch_size]
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowerCamelCase_ : List[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowerCamelCase_ : Union[str, Any] = generate_example_dataset(
os.path.join(lowerCAmelCase__ ,'dataset.arrow' ) ,lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes={'list': (1_00,)} ,)
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[int] = func(lowerCAmelCase__ ,**lowerCAmelCase__ )
print('shuffling dataset' )
lowerCamelCase_ : int = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' ,func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[Any] = func(
lowerCAmelCase__ ,**lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'wb' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 364 | 1 |
'''simple docstring'''
def snake_case ( a_ : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase_ : Any = ''''''
UpperCamelCase_ : str = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase_ : int = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase_ : str = [1 for i in range(len(a_ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase_ : Optional[Any] = 0
for j in range(len(a_ ) ):
UpperCamelCase_ : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase_ : Dict = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase_ : Tuple = j - k + 1 # noqa: E741
UpperCamelCase_ : List[str] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase_ : Optional[int] = length[j]
UpperCamelCase_ : List[str] = j
# create that string
UpperCamelCase_ : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=4_00 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , ):
UpperCamelCase_ : Dict = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : List[str] = batch_size
UpperCamelCase_ : List[Any] = num_channels
UpperCamelCase_ : List[str] = image_size
UpperCamelCase_ : Tuple = min_resolution
UpperCamelCase_ : List[str] = max_resolution
UpperCamelCase_ : Optional[int] = do_resize
UpperCamelCase_ : List[Any] = size
UpperCamelCase_ : Any = apply_ocr
def _UpperCAmelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
UpperCamelCase_ : List[str] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_ : List[str] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# Initialize image_processing
UpperCamelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_ : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _UpperCAmelCase ( self ):
# with apply_OCR = True
UpperCamelCase_ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase_ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase_ : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_ : Optional[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase_ : List[Any] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
UpperCamelCase_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
UpperCamelCase_ : Optional[int] = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 543 | 0 |
from collections import defaultdict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : Any = first_str.lower().strip()
_lowercase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowercase : str = first_str.replace(' ' , '' )
_lowercase : List[Any] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
_lowercase : defaultdict[str, int] = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : List[Any] = input("Enter the first string ").strip()
SCREAMING_SNAKE_CASE : Any = input("Enter the second string ").strip()
SCREAMING_SNAKE_CASE : int = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 89 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=24 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Any=6 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=1_000 , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = range_bbox
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE = t
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LiltForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = LiltModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] , device=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , bbox=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.Size([1, 2, 768] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , __SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 627 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Optional[Any] = "pt"
elif is_tf_available():
__A : Optional[Any] = "tf"
else:
__A : Tuple = "jax"
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = ByTaTokenizer
__magic_name__ : Optional[Any] = False
def _UpperCAmelCase ( self : int ):
super().setUp()
A__ : List[Any] =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def _UpperCAmelCase ( self : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=20 , UpperCamelCase__ : Tuple=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A__ : int =[]
for i in range(len(UpperCamelCase__ ) ):
try:
A__ : str =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ : Any =list(filter(lambda UpperCamelCase__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , UpperCamelCase__ ) )
A__ : str =list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
A__ : int =toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
A__ : List[Any] =toks + toks
# toks_str = [t[1] for t in toks]
A__ : str =[t[0] for t in toks]
# Ensure consistency
A__ : Optional[int] =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
A__ : List[Any] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
A__ : List[Any] =" " + output_txt
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : str =tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
A__ : Any =tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : Dict ="Unicode €."
A__ : List[Any] =tokenizer(UpperCamelCase__ )
A__ : List[str] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Tuple =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "Unicode €.</s>" )
A__ : Tuple =tokenizer("e è é ê ë" )
A__ : Tuple =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , UpperCamelCase__ )
# decoding
A__ : Union[str, Any] =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.ta_base_tokenizer
A__ : Any =["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
A__ : Any =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
A__ : Union[str, Any] =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
A__ : str =list(batch.input_ids.numpy()[0] )
else:
A__ : int =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Any =self.ta_base_tokenizer
A__ : List[Any] =["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ : Tuple =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCamelCase__ )
self.assertIn("attention_mask" , UpperCamelCase__ )
self.assertNotIn("decoder_input_ids" , UpperCamelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] =self.ta_base_tokenizer
A__ : Union[str, Any] =[
"Summary of the text.",
"Another summary.",
]
A__ : Dict =tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="max_length" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =self.ta_base_tokenizer
A__ : int =["A long paragraph for summarization. </s>"]
A__ : str =["Summary of the text. </s>"]
# fmt: off
A__ : int =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
A__ : str =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
A__ : int =tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["input_ids"][0] )
self.assertEqual(UpperCamelCase__ , batch["labels"][0] )
def _UpperCAmelCase ( self : List[Any] ):
# safety check on max_len default value so we are sure the test works
A__ : List[str] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : Tuple =tempfile.mkdtemp()
A__ : List[str] =" He is very happy, UNwant\u00E9d,running"
A__ : str =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : str =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
A__ : int =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ : str =tempfile.mkdtemp()
A__ : Optional[Any] =" He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
A__ : Dict =tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A__ : Tuple =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
A__ : List[str] =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ : List[str] =tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
A__ : List[str] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A__ : List[Any] =json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A__ : Tuple =json.load(UpperCamelCase__ )
A__ : int =[F'''<extra_id_{i}>''' for i in range(125 )]
A__ : int =added_tokens_extra_ids + [
"an_additional_special_token"
]
A__ : Optional[Any] =added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ : Tuple =tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ : Optional[int] =added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCamelCase__ )]
A__ : Union[str, Any] =tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
A__ : Optional[int] =tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def _UpperCAmelCase ( self : List[Any] ):
pass
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Optional[int] ):
pass
def _UpperCAmelCase ( self : Any ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
A__ : Union[str, Any] =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : Tuple =["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
A__ : Optional[Any] =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
A__ : Union[str, Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A__ : List[str] =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
A__ : List[Any] =0
A__ : List[Any] =tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + "_id" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + "_id" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [] )
setattr(UpperCamelCase__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 595 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[str] =DPTConfig()
if "large" in checkpoint_url:
A__ : List[Any] =1024
A__ : Union[str, Any] =4096
A__ : List[str] =24
A__ : Any =16
A__ : int =[5, 11, 17, 23]
A__ : Union[str, Any] =[256, 512, 1024, 1024]
A__ : Optional[Any] =(1, 384, 384)
if "ade" in checkpoint_url:
A__ : Tuple =True
A__ : Any =150
A__ : Union[str, Any] ="huggingface/label-files"
A__ : str ="ade20k-id2label.json"
A__ : int =json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
A__ : Dict ={int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Union[str, Any] =idalabel
A__ : Tuple ={v: k for k, v in idalabel.items()}
A__ : Dict =[1, 150, 480, 480]
return config, expected_shape
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Union[str, Any] =["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : Tuple =name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
A__ : Union[str, Any] =name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
A__ : int =name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
A__ : Union[str, Any] =name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
A__ : List[str] =name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
A__ : str =name.replace("proj" , "projection" )
if "blocks" in name:
A__ : Dict =name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
A__ : List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ : Tuple =name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
A__ : Optional[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
A__ : Dict =name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
A__ : int =name.replace("scratch" , "neck" )
if "layer1_rn" in name:
A__ : Tuple =name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
A__ : Optional[Any] =name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
A__ : str =name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
A__ : List[Any] =name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
A__ : str =int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : Any =name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ : Union[str, Any] =name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
A__ : str =name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
A__ : Tuple =name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
A__ : Optional[int] =name.replace("conv1" , "convolution1" )
if "conv2" in name:
A__ : List[str] =name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Optional[int] =name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : List[Any] =name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : str =name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : Tuple =name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Dict =name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
A__ : int =name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
A__ : Tuple =name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
A__ : Tuple =name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
A__ : Optional[int] =name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
A__ : Union[str, Any] =name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
A__ : Optional[int] =name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
A__ : str =name.replace("pretrained" , "dpt" )
if "bn" in name:
A__ : Optional[Any] =name.replace("bn" , "batch_norm" )
if "head" in name:
A__ : str =name.replace("head" , "head.head" )
if "encoder.norm" in name:
A__ : List[Any] =name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
A__ : Dict =name.replace("auxlayer" , "auxiliary_head.head" )
return name
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : str ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : int =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ : List[Any] =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[Any] =in_proj_weight[: config.hidden_size, :]
A__ : Dict =in_proj_bias[: config.hidden_size]
A__ : Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Tuple =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Any =in_proj_weight[
-config.hidden_size :, :
]
A__ : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[str] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : int ):
"""simple docstring"""
A__ , A__ : List[str] =get_dpt_config(UpperCamelCase )
# load original state_dict from URL
A__ : str =torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
A__ : Dict =state_dict.pop(UpperCamelCase )
A__ : Optional[int] =val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# load HuggingFace model
A__ : Tuple =DPTForSemanticSegmentation(UpperCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# Check outputs on an image
A__ : Any =480 if "ade" in checkpoint_url else 384
A__ : Dict =DPTImageProcessor(size=UpperCamelCase )
A__ : List[str] =prepare_img()
A__ : str =image_processor(UpperCamelCase , return_tensors="pt" )
# forward pass
A__ : Tuple =model(**UpperCamelCase ).logits if "ade" in checkpoint_url else model(**UpperCamelCase ).predicted_depth
# Assert logits
A__ : Union[str, Any] =torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
A__ : Tuple =torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(UpperCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , UpperCamelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , UpperCamelCase )
)
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
__A : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 595 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase ( _A ):
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a_ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a_ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(a_ , "num_encoder_blocks" ) )
class lowerCamelCase :
def __init__( self , a_ , a_=13 , a_=64 , a_=3 , a_=4 , a_=[2, 2, 2, 2] , a_=[8, 4, 2, 1] , a_=[16, 32, 64, 128] , a_=[1, 4, 8, 16] , a_=[1, 2, 4, 8] , a_=True , a_=True , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.02 , a_=3 , a_=None , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Union[str, Any] = num_encoder_blocks
lowerCAmelCase : Tuple = sr_ratios
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : List[str] = downsampling_rates
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Any = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : int = scope
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Optional[int] = SegformerModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Tuple = model(a_ )
lowerCAmelCase : str = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : Any = SegformerForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Any = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase : Any = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self , a_ , a_ , a_ ):
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Dict = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a_ )
lowerCAmelCase : Optional[int] = model(a_ , labels=a_ )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = config_and_inputs
lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = SegformerModelTester(self )
lowerCAmelCase : Optional[int] = SegformerConfigTester(self , config_class=a_ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a_ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(a_ )
lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
lowerCAmelCase : Any = True
lowerCAmelCase : str = False
lowerCAmelCase : Dict = True
lowerCAmelCase : Dict = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(a_ , a_ ) )
lowerCAmelCase : Optional[Any] = outputs.attentions
lowerCAmelCase : List[Any] = sum(self.model_tester.depths )
self.assertEqual(len(a_ ) , a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : int = model(**self._prepare_for_class(a_ , a_ ) )
lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
# verify the first attentions (first block, first layer)
lowerCAmelCase : List[Any] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase : Dict = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase : str = len(a_ )
# Check attention is always last and order is fine
lowerCAmelCase : Any = True
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Any = model(**self._prepare_for_class(a_ , a_ ) )
self.assertEqual(out_len + 1 , len(a_ ) )
lowerCAmelCase : int = outputs.attentions
self.assertEqual(len(a_ ) , a_ )
# verify the first attentions (first block, first layer)
lowerCAmelCase : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(a_ , a_ , a_ ):
lowerCAmelCase : int = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) )
lowerCAmelCase : Optional[int] = outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(a_ ) , a_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(a_ , a_ , a_ )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(a_ ):
continue
lowerCAmelCase : Optional[Any] = model_class(a_ )
model.to(a_ )
model.train()
lowerCAmelCase : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ )
lowerCAmelCase : Optional[Any] = model(**a_ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = SegformerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __A ( ):
lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
lowerCAmelCase : int = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
lowerCAmelCase : str = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a_ )
lowerCAmelCase : List[Any] = prepare_img()
lowerCAmelCase : Any = image_processor(images=a_ , return_tensors="pt" )
lowerCAmelCase : Optional[int] = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(a_ )
lowerCAmelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a_ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
lowerCAmelCase : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
lowerCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(a_ )
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Any = image_processor(images=a_ , return_tensors="pt" )
lowerCAmelCase : Tuple = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
lowerCAmelCase : Any = model(a_ )
lowerCAmelCase : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a_ )
lowerCAmelCase : str = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a_ , atol=1e-1 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
lowerCAmelCase : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
lowerCAmelCase : int = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
a_ )
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : Optional[int] = image_processor(images=a_ , return_tensors="pt" )
lowerCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(a_ )
with torch.no_grad():
lowerCAmelCase : Tuple = model(a_ )
lowerCAmelCase : Dict = outputs.logits.detach().cpu()
lowerCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(500, 300)] )
lowerCAmelCase : Dict = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a_ )
lowerCAmelCase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=a_ )
lowerCAmelCase : Union[str, Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a_ )
| 525 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase = True
except ImportError:
lowerCAmelCase = False
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( a_ : Namespace ):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class lowerCamelCase ( _A ):
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : Optional[int] = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=a_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=a_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=a_ )
def __init__( self , a_ , a_ , a_=None , *a_ ):
lowerCAmelCase : Any = testing
lowerCAmelCase : str = testing_file
lowerCAmelCase : Optional[int] = path
def _lowerCamelCase ( self ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(a_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCAmelCase : List[str] = (
Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCAmelCase : str = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(a_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCAmelCase : Dict = json.load(a_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=a_ , extra_context=a_ , )
lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCAmelCase : str = json.load(a_ )
lowerCAmelCase : List[str] = configuration["lowercase_modelname"]
lowerCAmelCase : List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F'''{directory}/configuration.json''' )
lowerCAmelCase : Optional[int] = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[int] = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : Optional[Any] = "Flax" in generate_tensorflow_pytorch_and_flax
lowerCAmelCase : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(a_ , exist_ok=a_ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=a_ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(a_ ):
with open(a_ , "r" ) as f:
lowerCAmelCase : Dict = f.readlines()
with open(a_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(a_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(a_ , a_ , a_ ):
# Create temp file
lowerCAmelCase , lowerCAmelCase : Tuple = mkstemp()
lowerCAmelCase : int = False
with fdopen(a_ , "w" ) as new_file:
with open(a_ ) as old_file:
for line in old_file:
new_file.write(a_ )
if line_to_copy_below in line:
lowerCAmelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(a_ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(a_ , a_ )
# Remove original file
remove(a_ )
# Move new file
move(a_ , a_ )
def skip_units(a_ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(a_ ):
with open(a_ ) as datafile:
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCAmelCase : Optional[int] = line.split("\"" )[1]
lowerCAmelCase : Tuple = skip_units(a_ )
elif "# Below: " in line and "##" not in line:
lowerCAmelCase : Any = line.split("\"" )[1]
lowerCAmelCase : List[str] = skip_units(a_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(a_ , a_ , a_ )
lowerCAmelCase : List[str] = []
elif "# Replace with" in line and "##" not in line:
lowerCAmelCase : Any = []
elif "##" not in line:
lines_to_copy.append(a_ )
remove(a_ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(a_ )
| 525 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCAmelCase ( a : int ):
if num <= 0:
snake_case__ = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(a )
snake_case__ = [True] * (num + 1)
snake_case__ = []
snake_case__ = 2
snake_case__ = int(math.sqrt(a ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a )
# Set multiples of start be False
for i in range(start * start , num + 1 , a ):
if sieve[i] is True:
snake_case__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(a )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ,__UpperCamelCase: Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = SavedModel()
SCREAMING_SNAKE_CASE : str = []
with open(os.path.join(__UpperCamelCase ,'utils' ,'tf_ops' ,'onnx.json' ) ) as f:
SCREAMING_SNAKE_CASE : List[Any] = json.load(__UpperCamelCase )['opsets']
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] )
with open(__UpperCamelCase ,'rb' ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE : Dict = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE : Optional[int] = sorted(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCamelCase )
if strict and len(__UpperCamelCase ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__UpperCamelCase ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__UpperCamelCase ,sep='\n' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 28 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 717 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
a_ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Dict = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = ["""input_ids""", """attention_mask"""]
_A : Tuple = MBartTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
snake_case_ : Dict = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case_ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX"""
snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
snake_case_ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase (self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case_ : int = src_lang
snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Union[str, Any] = tgt_lang_id
return inputs
def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
snake_case_ : List[str] = src_lang
snake_case_ : int = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def __UpperCamelCase (self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase (self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Tuple = []
snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
snake_case_ : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 48 | 0 |
"""simple docstring"""
import sys
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
lowerCamelCase_ = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase_ = a + chain_length - 1
lowerCamelCase_ = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase_ = cost
lowerCamelCase_ = c
return matrix, sol
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
if i == j:
print('''A''' + str(_lowerCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(''')''' , end=''' ''' )
def lowerCamelCase_ ( ):
lowerCamelCase_ = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowerCamelCase_ = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase_ , lowerCamelCase_ = matrix_chain_order(_lowerCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 142 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowercase : Dict = """src/transformers"""
# Matches is_xxx_available()
__lowercase : Tuple = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
__lowercase : Dict = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowercase : List[str] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
__lowercase : Tuple = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
__lowercase : List[str] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowercase : str = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
__lowercase : Union[str, Any] = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowercase : Union[str, Any] = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
__lowercase : Optional[Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
__lowercase : Optional[int] = re.compile(r"""^\s*try:""")
# Catches a line with else:
__lowercase : Union[str, Any] = re.compile(r"""^\s*else:""")
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
lowerCamelCase_ = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Tuple ):
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCamelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCamelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
lowerCamelCase_ = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
lowerCamelCase_ = re.findall('''\[([^\]]+)\]''' , _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCamelCase_ = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
lowerCamelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCamelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCamelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
lowerCamelCase_ = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(''', ''' )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
lowerCamelCase_ = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(''', ''' )
lowerCamelCase_ = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCamelCase_ = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCamelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCamelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCamelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCamelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
def find_duplicates(_lowerCamelCase : List[Any] ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCamelCase_ = []
for key in import_dict_objects.keys():
lowerCamelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCamelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCamelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase_ = os.path.join(_lowerCamelCase , '''__init__.py''' )
lowerCamelCase_ = parse_init(_lowerCamelCase )
if objects is not None:
lowerCamelCase_ = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_lowerCamelCase ) )
def lowerCamelCase_ ( ):
lowerCamelCase_ = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCamelCase_ = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
lowerCamelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCamelCase_ = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
lowerCamelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
__lowercase : int = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCamelCase_ ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_lowerCamelCase , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 142 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
assert isinstance(__snake_case , __snake_case ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase : Any = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__snake_case )
else:
UpperCAmelCase : Any = sylvester(number - 1 )
UpperCAmelCase : List[str] = num - 1
UpperCAmelCase : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class a__ ( _snake_case ):
"""simple docstring"""
A__ : Union[str, Any] = ['''pixel_values''']
def __init__( self :Optional[int] , lowercase__ :bool = True , lowercase__ :Dict[str, int] = None , lowercase__ :PILImageResampling = PILImageResampling.BICUBIC , lowercase__ :bool = True , lowercase__ :Dict[str, int] = None , lowercase__ :bool = True , lowercase__ :Union[int, float] = 1 / 255 , lowercase__ :bool = True , lowercase__ :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowercase__ :Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowercase__ :Dict , ):
super().__init__(**lowercase__ )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(lowercase__ , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase ( self :List[str] , lowercase__ :np.ndarray , lowercase__ :Dict[str, int] , lowercase__ :PILImageResampling = PILImageResampling.BICUBIC , lowercase__ :Optional[Union[str, ChannelDimension]] = None , **lowercase__ :Tuple , ):
lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase = int((256 / 224) * size['shortest_edge'] )
lowercase = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
lowercase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowercase__ , size=(size_dict['height'], size_dict['width']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Dict , lowercase__ :np.ndarray , lowercase__ :Dict[str, int] , lowercase__ :Optional[Union[str, ChannelDimension]] = None , **lowercase__ :Optional[int] , ):
lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Optional[int] , lowercase__ :np.ndarray , lowercase__ :Union[int, float] , lowercase__ :Optional[Union[str, ChannelDimension]] = None , **lowercase__ :Dict , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :np.ndarray , lowercase__ :Union[float, List[float]] , lowercase__ :Union[float, List[float]] , lowercase__ :Optional[Union[str, ChannelDimension]] = None , **lowercase__ :Union[str, Any] , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCAmelCase ( self :Dict , lowercase__ :ImageInput , lowercase__ :Optional[bool] = None , lowercase__ :Optional[Dict[str, int]] = None , lowercase__ :PILImageResampling = None , lowercase__ :Optional[bool] = None , lowercase__ :Optional[Dict[str, int]] = None , lowercase__ :Optional[bool] = None , lowercase__ :Optional[float] = None , lowercase__ :Optional[bool] = None , lowercase__ :Optional[Union[float, Iterable[float]]] = None , lowercase__ :Optional[Union[float, Iterable[float]]] = None , lowercase__ :Optional[TensorType] = None , lowercase__ :ChannelDimension = ChannelDimension.FIRST , **lowercase__ :Union[str, Any] , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(lowercase__ , default_to_square=lowercase__ )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(lowercase__ , param_name='crop_size' )
lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
lowercase = [self.resize(lowercase__ , lowercase__ , lowercase__ ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(lowercase__ , lowercase__ ) for image in images]
if do_rescale:
lowercase = [self.rescale(lowercase__ , lowercase__ ) for image in images]
if do_normalize:
lowercase = [self.normalize(lowercase__ , lowercase__ , lowercase__ ) for image in images]
lowercase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 314 |
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self :Dict ):
lowercase = {}
def __UpperCAmelCase ( self :Dict , lowercase__ :str , lowercase__ :Optional[Any] , lowercase__ :int=1 ):
if self.graph.get(lowercase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase = [[w, v]]
if not self.graph.get(lowercase__ ):
lowercase = []
def __UpperCAmelCase ( self :List[str] ):
return list(self.graph )
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :Any , lowercase__ :Dict ):
if self.graph.get(lowercase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase__ )
def __UpperCAmelCase ( self :str , lowercase__ :Optional[Any]=-2 , lowercase__ :Any=-1 ):
if s == d:
return []
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return visited
def __UpperCAmelCase ( self :Optional[int] , lowercase__ :List[Any]=-1 ):
if c == -1:
lowercase = floor(random() * 1_0000 ) + 10
for i in range(lowercase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase__ , lowercase__ , 1 )
def __UpperCAmelCase ( self :int , lowercase__ :List[str]=-2 ):
lowercase = deque()
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
d.append(lowercase__ )
visited.append(lowercase__ )
while d:
lowercase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self :int , lowercase__ :int ):
lowercase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCAmelCase ( self :List[Any] , lowercase__ :Optional[int] ):
return len(self.graph[u] )
def __UpperCAmelCase ( self :str , lowercase__ :Optional[Any]=-2 ):
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
lowercase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return sorted_nodes
def __UpperCAmelCase ( self :int ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return list(lowercase__ )
def __UpperCAmelCase ( self :List[str] ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return False
def __UpperCAmelCase ( self :int , lowercase__ :Tuple=-2 , lowercase__ :Any=-1 ):
lowercase = time()
self.dfs(lowercase__ , lowercase__ )
lowercase = time()
return end - begin
def __UpperCAmelCase ( self :str , lowercase__ :List[Any]=-2 ):
lowercase = time()
self.bfs(lowercase__ )
lowercase = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self :Optional[int] ):
lowercase = {}
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :Tuple , lowercase__ :Tuple , lowercase__ :str=1 ):
# check if the u exists
if self.graph.get(lowercase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase = [[w, v]]
# add the other way
if self.graph.get(lowercase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase = [[w, u]]
def __UpperCAmelCase ( self :Tuple , lowercase__ :Any , lowercase__ :List[Any] ):
if self.graph.get(lowercase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase__ )
# the other way round
if self.graph.get(lowercase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase__ )
def __UpperCAmelCase ( self :List[str] , lowercase__ :Union[str, Any]=-2 , lowercase__ :List[str]=-1 ):
if s == d:
return []
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return visited
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :Union[str, Any]=-1 ):
if c == -1:
lowercase = floor(random() * 1_0000 ) + 10
for i in range(lowercase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase__ , lowercase__ , 1 )
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :str=-2 ):
lowercase = deque()
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
d.append(lowercase__ )
visited.append(lowercase__ )
while d:
lowercase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self :List[str] , lowercase__ :Optional[int] ):
return len(self.graph[u] )
def __UpperCAmelCase ( self :List[Any] ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return list(lowercase__ )
def __UpperCAmelCase ( self :Any ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return False
def __UpperCAmelCase ( self :str ):
return list(self.graph )
def __UpperCAmelCase ( self :List[str] , lowercase__ :Tuple=-2 , lowercase__ :Union[str, Any]=-1 ):
lowercase = time()
self.dfs(lowercase__ , lowercase__ )
lowercase = time()
return end - begin
def __UpperCAmelCase ( self :Any , lowercase__ :Any=-2 ):
lowercase = time()
self.bfs(lowercase__ )
lowercase = time()
return end - begin
| 314 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
class A__ ( __snake_case , __snake_case ):
'''simple docstring'''
snake_case__ = 1
@register_to_config
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : int = 2000 , _SCREAMING_SNAKE_CASE : float = 0.1_5 , _SCREAMING_SNAKE_CASE : float = 0.0_1 , _SCREAMING_SNAKE_CASE : float = 1_3_4_8.0 , _SCREAMING_SNAKE_CASE : float = 1E-5 , _SCREAMING_SNAKE_CASE : int = 1 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
self.set_sigmas(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : Optional[int] = None ):
"""simple docstring"""
return sample
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = None , _SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase = torch.linspace(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = None , _SCREAMING_SNAKE_CASE : float = None , _SCREAMING_SNAKE_CASE : float = None ):
"""simple docstring"""
UpperCamelCase = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase = torch.exp(torch.linspace(math.log(_SCREAMING_SNAKE_CASE ) , math.log(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
UpperCamelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase = self.get_adjacent_sigma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(sample.device )
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE )
UpperCamelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase = diffusion.unsqueeze(-1 )
UpperCamelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=_SCREAMING_SNAKE_CASE , device=sample.device , dtype=sample.dtype )
UpperCamelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , prev_sample_mean=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , _SCREAMING_SNAKE_CASE : bool = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase = randn_tensor(sample.shape , layout=sample.layout , generator=_SCREAMING_SNAKE_CASE ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCamelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase = step_size.unsqueeze(-1 )
UpperCamelCase = sample + step_size * model_output
UpperCamelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : torch.FloatTensor , ):
"""simple docstring"""
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_SCREAMING_SNAKE_CASE ) * sigmas[:, None, None, None]
)
UpperCamelCase = noise + original_samples
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 280 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__magic_name__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__magic_name__ : Optional[Any] = 12_8022
__magic_name__ : Dict = 12_8028
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MaMaaaTokenizer
snake_case__ = False
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : str , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = '</s>'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<s>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , 'This is a test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
snake_case__ = """facebook/m2m100_418M"""
snake_case__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
snake_case__ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
snake_case__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr' )
UpperCamelCase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'] , 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
UpperCamelCase = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'en'
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCamelCase = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCamelCase = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCamelCase = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
} , )
| 280 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = '''markuplm'''
def __init__( self : Optional[int] , _snake_case : Optional[int]=3_0522 , _snake_case : List[str]=768 , _snake_case : Optional[int]=12 , _snake_case : Tuple=12 , _snake_case : str=3072 , _snake_case : Union[str, Any]="gelu" , _snake_case : Any=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=512 , _snake_case : str=2 , _snake_case : List[str]=0.02 , _snake_case : str=1E-1_2 , _snake_case : Optional[Any]=0 , _snake_case : Tuple=0 , _snake_case : List[str]=2 , _snake_case : Tuple=256 , _snake_case : Optional[int]=1024 , _snake_case : Optional[Any]=216 , _snake_case : str=1001 , _snake_case : int=32 , _snake_case : int=50 , _snake_case : Optional[Any]="absolute" , _snake_case : List[Any]=True , _snake_case : Dict=None , **_snake_case : List[Any] , ):
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
__lowercase : Any = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : int = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : int = initializer_range
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[Any] = position_embedding_type
__lowercase : Optional[Any] = use_cache
__lowercase : Optional[Any] = classifier_dropout
# additional properties
__lowercase : int = max_depth
__lowercase : Any = max_xpath_tag_unit_embeddings
__lowercase : str = max_xpath_subs_unit_embeddings
__lowercase : List[str] = tag_pad_id
__lowercase : Union[str, Any] = subs_pad_id
__lowercase : List[Any] = xpath_unit_hidden_size
| 284 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
__lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_snake_case ) , torch_builtin(_snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(_snake_case ) , gelu_new(_snake_case ) ) )
def snake_case_ ( self : Dict ):
__lowercase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowercase : List[str] = get_activation('''gelu''' )
__lowercase : Optional[Any] = get_activation('''gelu_10''' )
__lowercase : Tuple = torch_builtin(_snake_case )
__lowercase : List[Any] = geluaa(_snake_case )
__lowercase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self : Any ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_snake_case ):
get_activation('''bogus''' )
with self.assertRaises(_snake_case ):
get_activation(_snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Union[str, Any] = get_activation('''gelu''' )
__lowercase : List[str] = 1
__lowercase : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_snake_case ):
__lowercase : Tuple = acta.a
| 284 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ['a', 'b', 'c']
# Defaults to last layer if both are None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , a , a)
self.assertEqual(a , ['c'])
self.assertEqual(a , [2])
# Out indices set to match out features
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(['a', 'c'] , a , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [0, 2])
# Out features set to match out indices
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [0, 2] , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [0, 2])
# Out features selected from negative indices
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(a , [-3, -1] , a)
self.assertEqual(a , ['a', 'c'])
self.assertEqual(a , [-3, -1])
def SCREAMING_SNAKE_CASE__ ( self) -> int:
# Stage names must be set
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , a)
# Out features must be a list
with self.assertRaises(a):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'])
# Out features must be a subset of stage names
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'])
# Out indices must be a list or tuple
with self.assertRaises(a):
verify_out_features_out_indices(a , 0 , ['a', 'b'])
# Out indices must be a subset of stage names
with self.assertRaises(a):
verify_out_features_out_indices(a , (0, 1) , ['a'])
# Out features and out indices must be the same length
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'])
# Out features should match out indices
with self.assertRaises(a):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'])
# Out features and out indices should be in order
with self.assertRaises(a):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'])
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'])
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = BackboneMixin()
SCREAMING_SNAKE_CASE = ['a', 'b', 'c']
SCREAMING_SNAKE_CASE = ['a', 'c']
SCREAMING_SNAKE_CASE = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
SCREAMING_SNAKE_CASE = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'])
self.assertEqual(backbone.out_indices , [0, 1])
SCREAMING_SNAKE_CASE = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'])
self.assertEqual(backbone.out_indices , [-3, -1])
| 73 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """dandelin/vilt-b32-finetuned-vqa"""
__SCREAMING_SNAKE_CASE = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__SCREAMING_SNAKE_CASE = """image_qa"""
__SCREAMING_SNAKE_CASE = AutoProcessor
__SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering
__SCREAMING_SNAKE_CASE = ["""image""", """text"""]
__SCREAMING_SNAKE_CASE = ["""text"""]
def __init__( self : str , *a_ : Dict , **a_ : Dict ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def A ( self : Tuple , a_ : "Image" , a_ : str ):
"""simple docstring"""
return self.pre_processor(a_ , a_ , return_tensors="pt" )
def A ( self : Dict , a_ : Union[str, Any] ):
"""simple docstring"""
with torch.no_grad():
return self.model(**a_ ).logits
def A ( self : List[Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE =list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE =[
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE =[
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase__( __SCREAMING_SNAKE_CASE : Matrix , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ):
if location := find_empty_location(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : List[str] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = digit
if sudoku(__SCREAMING_SNAKE_CASE ) is not None:
return grid
lowercase_ : Tuple = 0
return None
def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ):
for row in grid:
for cell in row:
print(__SCREAMING_SNAKE_CASE , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__SCREAMING_SNAKE_CASE =sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 425 | """simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : Union[str, Any] = 3_84
lowercase_ : Union[str, Any] = 7
if "tiny" in model_name:
lowercase_ : int = 96
lowercase_ : List[Any] = (2, 2, 6, 2)
lowercase_ : Union[str, Any] = (3, 6, 12, 24)
elif "small" in model_name:
lowercase_ : Optional[int] = 96
lowercase_ : List[Any] = (2, 2, 18, 2)
lowercase_ : List[Any] = (3, 6, 12, 24)
elif "base" in model_name:
lowercase_ : Any = 1_28
lowercase_ : Tuple = (2, 2, 18, 2)
lowercase_ : Optional[int] = (4, 8, 16, 32)
lowercase_ : Union[str, Any] = 12
lowercase_ : Optional[int] = 5_12
elif "large" in model_name:
lowercase_ : Union[str, Any] = 1_92
lowercase_ : Any = (2, 2, 18, 2)
lowercase_ : int = (6, 12, 24, 48)
lowercase_ : Optional[Any] = 12
lowercase_ : Union[str, Any] = 7_68
# set label information
lowercase_ : Union[str, Any] = 1_50
lowercase_ : Union[str, Any] = 'huggingface/label-files'
lowercase_ : Any = 'ade20k-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : Any = {v: k for k, v in idalabel.items()}
lowercase_ : Any = SwinConfig(
embed_dim=__SCREAMING_SNAKE_CASE , depths=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , window_size=__SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowercase_ : int = UperNetConfig(
backbone_config=__SCREAMING_SNAKE_CASE , auxiliary_in_channels=__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ : Dict = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:dim, :]
lowercase_ : List[str] = in_proj_bias[: dim]
lowercase_ : int = in_proj_weight[
dim : dim * 2, :
]
lowercase_ : List[Any] = in_proj_bias[
dim : dim * 2
]
lowercase_ : Optional[Any] = in_proj_weight[
-dim :, :
]
lowercase_ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ , lowercase_ : List[Any] = x.shape
lowercase_ : str = x.reshape(__SCREAMING_SNAKE_CASE , 4 , in_channel // 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ , lowercase_ : List[str] = x.shape
lowercase_ : List[str] = x.reshape(__SCREAMING_SNAKE_CASE , in_channel // 4 , 4 )
lowercase_ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[str] = x.shape[0]
lowercase_ : List[str] = x.reshape(4 , in_channel // 4 )
lowercase_ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = x.shape[0]
lowercase_ : List[str] = x.reshape(in_channel // 4 , 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowercase_ : List[Any] = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' , file_name=__SCREAMING_SNAKE_CASE )[
'state_dict'
]
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
lowercase_ : int = get_upernet_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = UperNetForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE )
if "bn" in key:
lowercase_ : List[Any] = key.replace('bn' , 'batch_norm' )
lowercase_ : Optional[Any] = val
# rename keys
lowercase_ : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ : List[str] = reverse_correct_unfold_reduction_order(__SCREAMING_SNAKE_CASE )
if "norm" in key:
lowercase_ : str = reverse_correct_unfold_norm_order(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify on image
lowercase_ : Optional[int] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowercase_ : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowercase_ : Any = SegformerImageProcessor()
lowercase_ : str = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase_ : Tuple = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase_ : Tuple = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase_ : Tuple = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 425 | 1 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase_ = namedtuple('from_to', 'from_ to')
lowerCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Any = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowercase__ : Optional[Any] = DetaConfig(
backbone_config=__lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
lowercase__ : Any = '''huggingface/label-files'''
if "o365" in model_name:
lowercase__ : Optional[Any] = 3_66
lowercase__ : List[Any] = '''object365-id2label.json'''
else:
lowercase__ : Union[str, Any] = 91
lowercase__ : Optional[int] = '''coco-detection-id2label.json'''
lowercase__ : List[Any] = num_labels
lowercase__ : str = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Any = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = dct.pop(__lowerCamelCase )
lowercase__ : Optional[int] = val
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : List[Any] = in_proj_weight[:dim, :]
lowercase__ : List[str] = in_proj_bias[: dim]
lowercase__ : Dict = in_proj_weight[
dim : dim * 2, :
]
lowercase__ : str = in_proj_bias[
dim : dim * 2
]
lowercase__ : str = in_proj_weight[
-dim :, :
]
lowercase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
# transformer decoder self-attention layers
lowercase__ : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : Any = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Dict = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[:hidden_size, :]
lowercase__ : str = in_proj_bias[:hidden_size]
lowercase__ : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowercase__ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowercase__ : Union[str, Any] = in_proj_weight[-hidden_size:, :]
lowercase__ : Union[str, Any] = in_proj_bias[-hidden_size:]
def __UpperCAmelCase ( ) -> List[Any]:
lowercase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Dict = get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
lowercase__ : List[str] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
lowercase__ : Dict = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
lowercase__ : Optional[Any] = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
lowercase__ : List[Any] = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowercase__ : str = state_dict.pop(__lowerCamelCase )
lowercase__ : Dict = val
if "input_proj" in key:
lowercase__ : Any = state_dict.pop(__lowerCamelCase )
lowercase__ : int = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowercase__ : Tuple = state_dict.pop(__lowerCamelCase )
lowercase__ : int = val
# finally, create HuggingFace model and load state dict
lowercase__ : Any = DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
lowercase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__lowerCamelCase )
# load image processor
lowercase__ : Any = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
lowercase__ : Dict = prepare_img()
lowercase__ : Any = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : str = encoding['''pixel_values''']
lowercase__ : Tuple = model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
lowercase__ : Any = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
lowercase__ : Any = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
lowercase__ : Dict = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
lowercase__ : int = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 122 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase__ :Union[str, Any] = re.compile(R'\s+')
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Any:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCamelCase , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = [len(_UpperCamelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(_UpperCamelCase ), "line_max": max(_UpperCamelCase )}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=5 ) ->Any:
"""simple docstring"""
__UpperCAmelCase : Dict = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
__UpperCAmelCase : List[Any] = example['''content'''].splitlines()
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 ) ->str:
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''unit tests''', '''test file''', '''configuration file''']
__UpperCAmelCase : Optional[int] = example['''content'''].splitlines()
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Union[str, Any] = 0
# first test
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__UpperCAmelCase : Optional[int] = example['''content'''].count('''\n''' )
__UpperCAmelCase : Union[str, Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ['''def ''', '''class ''', '''for ''', '''while ''']
__UpperCAmelCase : str = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=4 ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Tuple = example['''content'''].splitlines()
__UpperCAmelCase : Any = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = tokenizer(example['''content'''] , truncation=_UpperCamelCase )['''input_ids''']
__UpperCAmelCase : str = len(example['''content'''] ) / len(_UpperCamelCase )
return {"ratio": ratio}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = {}
results.update(get_hash(_UpperCamelCase ) )
results.update(line_stats(_UpperCamelCase ) )
results.update(alpha_stats(_UpperCamelCase ) )
results.update(char_token_ratio(_UpperCamelCase ) )
results.update(is_autogenerated(_UpperCamelCase ) )
results.update(is_config_or_test(_UpperCamelCase ) )
results.update(has_no_keywords(_UpperCamelCase ) )
results.update(has_few_assignments(_UpperCamelCase ) )
return results
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->int:
"""simple docstring"""
if not check_uniques(_UpperCamelCase , _UpperCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
with open(_UpperCamelCase , '''rb''' ) as f_in:
with gzip.open(str(_UpperCamelCase ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
os.unlink(_UpperCamelCase )
# Settings
lowercase__ :Dict = HfArgumentParser(PreprocessingArguments)
lowercase__ :int = parser.parse_args()
if args.num_workers is None:
lowercase__ :Union[str, Any] = multiprocessing.cpu_count()
lowercase__ :Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase__ :Union[str, Any] = time.time()
lowercase__ :Union[str, Any] = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowercase__ :Any = time.time()
lowercase__ :Union[str, Any] = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowercase__ :Any = set(ds.unique('hash'))
lowercase__ :int = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowercase__ :Tuple = time.time()
lowercase__ :str = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase__ :int = time.time()
lowercase__ :Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowercase__ :Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowercase__ :Union[str, Any] = output_dir / "data"
data_dir.mkdir(exist_ok=True)
lowercase__ :Any = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase__ :str = str(data_dir / f"""file-{file_number+1:012}.json""")
lowercase__ :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""") | 522 | '''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''onnx''']
def __init__( self : List[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[Any] ) ->Any:
requires_backends(cls , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) ->Optional[Any]:
requires_backends(cls , ['''onnx'''] )
| 390 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a: Optional[Any] = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ["""MaskFormerFeatureExtractor"""]
__a: str = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
__a: Optional[int] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 428 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: Tuple = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__a: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __a ( A__ : list[list[float]] ):
SCREAMING_SNAKE_CASE = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE = array(A__ )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." ) | 16 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 421 | 0 |
import string
from math import logaa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
snake_case_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ = corpus_without_punctuation.split("\n" )
snake_case_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return round(tf * idf , 3 )
| 139 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowercase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(set_a.intersection(__UpperCAmelCase ) )
if alternative_union:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) + len(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = len(set_a.union(__UpperCAmelCase ) )
return intersection / union
if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(__UpperCAmelCase , (list, tuple) ):
__SCREAMING_SNAKE_CASE = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) + len(__UpperCAmelCase )
return len(__UpperCAmelCase ) / union
else:
__SCREAMING_SNAKE_CASE = set_a + [element for element in set_b if element not in set_a]
return len(__UpperCAmelCase ) / len(__UpperCAmelCase )
return len(__UpperCAmelCase ) / len(__UpperCAmelCase )
return None
if __name__ == "__main__":
a = {"a", "b", "c", "d", "e"}
a = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 109 | """simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:str = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__:List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__:Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__:Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 528 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase ={
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase ={
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
lowerCamelCase ={
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = BertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase__ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
UpperCamelCase__ : int = do_lower_case
UpperCamelCase__ : Optional[Any] = strip_accents
UpperCamelCase__ : List[Any] = tokenize_chinese_chars
UpperCamelCase__ : int = normalizer_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : str = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 462 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase =False
lowerCamelCase =True
lowerCamelCase =False
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase =parser.parse_args()
lowerCamelCase ={
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
lowerCamelCase ={
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
lowerCamelCase ="" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
lowerCamelCase =reader.read()
lowerCamelCase =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
lowerCamelCase =UNetaDModel(**config)
else:
lowerCamelCase =UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
lowerCamelCase =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase =config[key]
del config[key]
lowerCamelCase =[k.replace("UNetRes", "") for k in config["down_block_types"]]
lowerCamelCase =[k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
lowerCamelCase =torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
lowerCamelCase ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
lowerCamelCase =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
lowerCamelCase =param_value
lowerCamelCase =True
if not has_changed:
lowerCamelCase =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 462 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A = '''bart'''
A = True
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Dict:
if LOAD_DENSE_INDEX:
__a : Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
__a : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
__a : Union[str, Any] = qar_model.eval()
else:
__a , __a : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__a : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
__a : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
__a : Optional[int] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
__a : List[Any] = sas_model.eval()
else:
__a , __a : List[str] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
__a : str = faiss.StandardGpuResources()
__a : Tuple = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''')['''train''']
__a : Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
__a : Dict = faiss.IndexFlatIP(1_28)
__a : Optional[int] = faiss.index_cpu_to_gpu(a_ , 1 , a_)
wikiaab_gpu_index_flat.add(a_) # TODO fix for larger GPU
else:
__a , __a : List[str] = (None, None)
__a : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Union[str, Any]:
__a : Union[str, Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''')
__a : List[str] = elia['''train_eli5''']
__a : Tuple = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28))
__a : Any = faiss.IndexFlatIP(1_28)
eli5_train_q_index.add(a_)
return (elia_train, eli5_train_q_index)
A , A , A = load_indexes()
A , A , A , A = load_models()
A , A = load_train_data()
def __A ( a_ :str , a_ :str=10) -> Dict:
__a : List[Any] = embed_questions_for_retrieval([question] , a_ , a_)
__a , __a : Tuple = eli5_train_q_index.search(a_ , a_)
__a : Union[str, Any] = [elia_train[int(a_)] for i in I[0]]
return nn_examples
def __A ( a_ :List[Any] , a_ :int="wiki40b" , a_ :Any="dense" , a_ :Dict=10) -> Any:
if source == "none":
__a , __a : Any = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
__a , __a : Optional[Any] = query_qa_dense_index(
a_ , a_ , a_ , a_ , a_ , a_)
else:
__a , __a : int = query_es_index(
a_ , a_ , index_name='''english_wiki40b_snippets_100w''' , n_results=a_ , )
__a : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a : Any = '''question: {} context: {}'''.format(a_ , a_)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a_: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a_: None),
})
def __A ( a_ :Tuple , a_ :Any , a_ :Tuple , a_ :List[Any]=64 , a_ :int=2_56 , a_ :Any=False , a_ :Dict=2 , a_ :Dict=0.9_5 , a_ :List[Any]=0.8) -> List[Any]:
with torch.no_grad():
__a : str = qa_sas_generate(
a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
A = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
A = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
A = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
A = st.sidebar.checkbox('''Demo options''')
if demo_options:
A = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
A = action_list.index(action_st)
A = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
A = show_type == '''Show full text of passages'''
else:
A = 3
A = True
A = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
A = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
A = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
A = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
A = '''wiki40b'''
A = '''dense'''
A = '''beam'''
A = 2
A = 64
A = 256
A = None
A = None
A = st.sidebar.checkbox('''Generation options''')
if generate_options:
A = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
A = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
A = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A = None
# start main text
A = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
A = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A = st.text_input('''Enter your question here:''', '''''')
else:
A = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
A , A = make_support(question, source=wiki_source, method='''dense''', n_results=10)
A , A = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A = support_list[:10]
A = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
A , A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A , A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
A = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
A = res[1].strip()
if sec_titles == "":
A = '''[{}]({})'''.format(res[0], wiki_url)
else:
A = sec_titles.split(''' & ''')
A = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
A = find_nearest_training(question)
A = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
A = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
A = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 52 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=6_4 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_input_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =embedding_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_labels
__UpperCAmelCase =num_choices
__UpperCAmelCase =scope
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =None
if self.use_input_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ (self):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertModel(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase , token_type_ids=UpperCAmelCase)
__UpperCAmelCase =model(UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertForMaskedLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertForCausalLM(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertForNextSentencePrediction(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertForPreTraining(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , next_sentence_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =MegatronBertForQuestionAnswering(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =MegatronBertForSequenceClassification(UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_labels
__UpperCAmelCase =MegatronBertForTokenClassification(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.num_choices
__UpperCAmelCase =MegatronBertForMultipleChoice(config=UpperCAmelCase)
model.to(UpperCAmelCase)
model.eval()
__UpperCAmelCase =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCAmelCase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) =config_and_inputs
__UpperCAmelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Union[str, Any] = True
# test_resize_embeddings = False
a_ : str = False
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False):
'''simple docstring'''
__UpperCAmelCase =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase)
if return_labels:
if model_class in get_values(UpperCAmelCase):
__UpperCAmelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase)
__UpperCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase)
return inputs_dict
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =MegatronBertModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Optional[Any]:
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
UpperCamelCase_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''')
def A__ (self):
'''simple docstring'''
__UpperCAmelCase ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCAmelCase =os.path.join(os.environ['''MYDIR'''] , UpperCAmelCase)
__UpperCAmelCase =MegatronBertModel.from_pretrained(UpperCAmelCase)
model.to(UpperCAmelCase)
model.half()
__UpperCAmelCase =_long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]])
with torch.no_grad():
__UpperCAmelCase =model(UpperCAmelCase)[0]
__UpperCAmelCase =torch.Size((1, 9, 1_0_2_4))
self.assertEqual(output.shape , UpperCAmelCase)
__UpperCAmelCase =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3):
for jj in range(3):
__UpperCAmelCase =output[0, ii, jj]
__UpperCAmelCase =expected[3 * ii + jj]
__UpperCAmelCase ='''ii={} jj={} a={} b={}'''.format(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
self.assertTrue(math.isclose(UpperCAmelCase , UpperCAmelCase , rel_tol=UpperCAmelCase , abs_tol=UpperCAmelCase) , msg=UpperCAmelCase)
| 142 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> List[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCAmelCase =ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCAmelCase =['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase =key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase =prophet
__UpperCAmelCase =prophet_old
else:
__UpperCAmelCase =prophet.prophetnet
__UpperCAmelCase =prophet_old.model
__UpperCAmelCase =False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase =mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCAmelCase =attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCAmelCase =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__UpperCAmelCase =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.bias
logger.info(f"""{attribute} is initialized""" )
__UpperCAmelCase =True
break
elif attribute in special_keys and hasattr(snake_case__ , '''in_proj_weight''' ):
__UpperCAmelCase =old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCAmelCase =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCAmelCase =True
break
if attribute.isdigit():
__UpperCAmelCase =model[int(snake_case__ )]
__UpperCAmelCase =old_model[int(snake_case__ )]
else:
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCAmelCase =old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 142 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.