code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = DanceDiffusionPipeline
A_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A_ = False
A_ = False
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase_ , use_timestep_embedding=UpperCamelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
_lowercase : List[str] = IPNDMScheduler()
_lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any=0 ) -> List[Any]:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('mps' ):
_lowercase : str = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Optional[int] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Optional[int] = DanceDiffusionPipeline(**UpperCamelCase_ )
_lowercase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : str = self.get_dummy_inputs(UpperCamelCase_ )
_lowercase : Optional[Any] = pipe(**UpperCamelCase_ )
_lowercase : Any = output.audios
_lowercase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase : Union[str, Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : int = torch_device
_lowercase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowercase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = torch.manual_seed(0 )
_lowercase : Dict = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
_lowercase : Union[str, Any] = output.audios
_lowercase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase : List[str] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = torch_device
_lowercase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
_lowercase : Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(generator=UpperCamelCase_ , num_inference_steps=100 , audio_length_in_s=4.0_96 )
_lowercase : Any = output.audios
_lowercase : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase : Dict = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCamelCase__ :
'''simple docstring'''
A_ = None
A_ = None
A_ = None # sigma(t_i)
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return cls()
@dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 42
class lowerCamelCase__ ( A , A ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 100 , UpperCamelCase_ : float = 1.0_07 , UpperCamelCase_ : float = 80 , UpperCamelCase_ : float = 0.05 , UpperCamelCase_ : float = 50 , ) -> Union[str, Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : KarrasVeSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
_lowercase : Union[str, Any] = jnp.arange(0 , UpperCamelCase_ )[::-1].copy()
_lowercase : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCamelCase_ , schedule=jnp.array(UpperCamelCase_ , dtype=jnp.floataa ) , timesteps=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : KarrasVeSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_lowercase : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_lowercase : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowercase : Any = random.split(UpperCamelCase_ , num=1 )
_lowercase : str = self.config.s_noise * random.normal(key=UpperCamelCase_ , shape=sample.shape )
_lowercase : List[str] = sigma + gamma * sigma
_lowercase : Optional[int] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : KarrasVeSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
_lowercase : int = sample_hat + sigma_hat * model_output
_lowercase : Dict = (sample_hat - pred_original_sample) / sigma_hat
_lowercase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCamelCase_ , derivative=UpperCamelCase_ , state=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : KarrasVeSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
_lowercase : List[str] = sample_prev + sigma_prev * model_output
_lowercase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
_lowercase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCamelCase_ , derivative=UpperCamelCase_ , state=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : KarrasVeSchedulerState , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Any ={
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : list ) -> None:
'''simple docstring'''
_lowercase : Any = set_counts
_lowercase : Optional[int] = max(UpperCamelCase_ )
_lowercase : Any = len(UpperCamelCase_ )
_lowercase : Dict = [1] * num_sets
_lowercase : int = list(range(UpperCamelCase_ ) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
_lowercase : str = self.get_parent(UpperCamelCase_ )
_lowercase : List[str] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowercase : Optional[Any] = 0
_lowercase : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowercase : Optional[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowercase : int = 0
_lowercase : int = src_parent
_lowercase : Any = self.set_counts[src_parent]
_lowercase : Any = max(self.max_set , UpperCamelCase_ )
return True
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
_lowercase : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : int=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=37 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=1000 , ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Any = is_training
_lowercase : Union[str, Any] = use_input_mask
_lowercase : Dict = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : str = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : int = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : int = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Union[str, Any] = scope
_lowercase : List[Any] = range_bbox
def __UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : Dict = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Tuple = bbox[i, j, 2]
_lowercase : Tuple = bbox[i, j, 0]
_lowercase : Union[str, Any] = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(UpperCamelCase_ )
_lowercase : List[str] = None
if self.use_input_mask:
_lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Any = None
_lowercase : Any = None
_lowercase : Dict = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Any = TFLayoutLMModel(config=UpperCamelCase_ )
_lowercase : Dict = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_lowercase : Any = model(UpperCamelCase_ , UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
_lowercase : List[Any] = model(UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
_lowercase : Dict = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
_lowercase : str = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Any = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
_lowercase : Any = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
_lowercase : int = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Any = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A_ = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = True
A_ = 10
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = TFLayoutLMModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def __UpperCamelCase ( ) -> str:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Dict = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
_lowercase : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
_lowercase : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
_lowercase : Optional[int] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Optional[int] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
_lowercase : Dict = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : str = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase_ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : int = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : List[Any] = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[str] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , UpperCamelCase_ )
# test the shape of the logits
_lowercase : List[str] = outputs.logits
_lowercase : Optional[int] = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
'''simple docstring'''
_lowercase : List[str] = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
# test the shape of the logits
_lowercase : Union[str, Any] = outputs.logits
_lowercase : Any = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowercase : str = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Optional[Any] = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the shape of the logits
_lowercase : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase_ )
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
_A : List[str] =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def __UpperCamelCase ( ) -> str:
_lowercase : Dict = Github(os.environ['GITHUB_TOKEN'] )
_lowercase : Tuple = g.get_repo('huggingface/transformers' )
_lowercase : List[str] = repo.get_issues(state='open' )
for issue in open_issues:
_lowercase : int = sorted([comment for comment in issue.get_comments()], key=lambda _lowercase : i.created_at, reverse=_lowercase )
_lowercase : List[Any] = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
import qiskit
def __UpperCamelCase ( _lowercase, _lowercase ) -> qiskit.result.counts.Counts:
_lowercase : Tuple = qiskit.Aer.get_backend('aer_simulator' )
_lowercase : Optional[Any] = qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_lowercase : Tuple = qiskit.execute(_lowercase, _lowercase, shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
_A : Any =half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Optional[Any] ='''▁'''
_A : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
_A : Any ={
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
_A : Optional[Any] =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["""input_ids""", """attention_mask"""]
A_ = []
A_ = []
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : List[str]="<mask>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : int , ) -> None:
'''simple docstring'''
_lowercase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
_lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase : List[str] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
_lowercase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : List[str] = 1
_lowercase : str = len(self.sp_model )
_lowercase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
_lowercase : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowercase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : Any = src_lang if src_lang is not None else 'en_XX'
_lowercase : str = self.lang_code_to_id[self._src_lang]
_lowercase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
_lowercase : int = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__( self : List[str] , UpperCamelCase_ : Dict ) -> None:
'''simple docstring'''
_lowercase : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase : Optional[int] = {}
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
_lowercase : str = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : int , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = []
_lowercase : Tuple = ''
_lowercase : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
_lowercase : str = True
_lowercase : Tuple = []
else:
current_sub_tokens.append(UpperCamelCase_ )
_lowercase : Any = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
_lowercase : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
_lowercase : Tuple = [1] * len(self.prefix_tokens )
_lowercase : List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowercase : Optional[Any] = src_lang
_lowercase : Optional[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
_lowercase : int = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Any = src_lang
_lowercase : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.lang_code_to_id[src_lang]
_lowercase : Dict = [self.cur_lang_code_id]
_lowercase : Optional[int] = [self.eos_token_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.lang_code_to_id[tgt_lang]
_lowercase : Union[str, Any] = [self.cur_lang_code_id]
_lowercase : Any = [self.eos_token_id]
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_lowercase : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_lowercase : List[Any] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
_lowercase : Optional[int] = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_lowercase : Tuple = output[output != -float('inf' )]
_lowercase : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@require_tf
class lowerCamelCase__ ( unittest.TestCase , A ):
'''simple docstring'''
if is_tf_available():
A_ = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_lowercase : Dict = 2
_lowercase : Tuple = 2
class lowerCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
super(UpperCamelCase_ , self ).__init__()
_lowercase : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
_lowercase : int = [[2, 0], [102, 103]]
_lowercase : int = [[1, 0], [1, 1]]
_lowercase : str = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': dummy_model.serving} )
_lowercase : int = tf.saved_model.load(UpperCamelCase_ ).signatures['serving_default']
for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ):
_lowercase : List[str] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
_lowercase : Optional[int] = serving_func(**UpperCamelCase_ )['sequences']
_lowercase : List[str] = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_lowercase : Any = 1
_lowercase : List[str] = 2
class lowerCamelCase__ ( tf.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
super(UpperCamelCase_ , self ).__init__()
_lowercase : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = self.model.generate(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , )
return {"sequences": outputs["sequences"]}
_lowercase : int = [[2], [102, 103]]
_lowercase : Optional[Any] = [[1], [1, 1]]
_lowercase : Tuple = DummyModel(model=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': dummy_model.serving} )
_lowercase : Optional[Any] = tf.saved_model.load(UpperCamelCase_ ).signatures['serving_default']
for input_row in range(len(UpperCamelCase_ ) ):
_lowercase : int = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
_lowercase : Any = serving_func(**UpperCamelCase_ )['sequences']
_lowercase : int = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ )
tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ )
@slow
@require_tensorflow_text
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=UpperCamelCase_ )
class lowerCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase : List[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , 'spiece.model' ) , 'rb' ).read() )
_lowercase : int = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ) -> str:
'''simple docstring'''
_lowercase : Dict = self.tokenizer.tokenize(UpperCamelCase_ )
_lowercase , _lowercase : Any = text.pad_model_inputs(
UpperCamelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_lowercase : Union[str, Any] = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
return self.tokenizer.detokenize(UpperCamelCase_ )
_lowercase : Optional[int] = CompleteSentenceTransformer()
_lowercase : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
_lowercase : Optional[int] = complete_model(UpperCamelCase_ )
_lowercase : Dict = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ )
keras_model.save(UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_lowercase : List[Any] = 14
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_lowercase : int = 'Hello, my dog is cute and'
_lowercase : str = tokenizer(UpperCamelCase_ , return_tensors='tf' )
_lowercase : List[Any] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_lowercase : List[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_lowercase : Tuple = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_lowercase : Union[str, Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_lowercase : Tuple = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
_lowercase : List[str] = 'Hugging Face is a technology company based in New York and Paris.'
_lowercase : List[Any] = bart_tokenizer(UpperCamelCase_ , return_tensors='tf' ).input_ids
_lowercase : Any = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
_lowercase : List[str] = bart_model.generate(UpperCamelCase_ ).numpy()
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : int = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
_lowercase : int = bart_model.generate(UpperCamelCase_ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) )
class lowerCamelCase__ ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : int , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
return super().call(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
_lowercase : List[str] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_lowercase : Any = bart_model.generate(UpperCamelCase_ ).numpy()
with self.assertRaises(UpperCamelCase_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase_ , foo='bar' )
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_A : Optional[Any] =None
_A : List[str] =logging.get_logger(__name__)
_A : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_A : int ={
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
_A : Optional[int] ={
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
_A : int ='''▁'''
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , **UpperCamelCase_ : int , ) -> str:
'''simple docstring'''
_lowercase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = vocab_file
_lowercase : Optional[Any] = False if not self.vocab_file else True
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A : Optional[int] =logging.get_logger('''transformers.models.speecht5''')
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
hf_model.apply_weight_norm()
_lowercase : int = checkpoint['input_conv.weight_g']
_lowercase : Union[str, Any] = checkpoint['input_conv.weight_v']
_lowercase : int = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_lowercase : List[str] = checkpoint[f'''upsamples.{i}.1.weight_g''']
_lowercase : Tuple = checkpoint[f'''upsamples.{i}.1.weight_v''']
_lowercase : Dict = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_lowercase : Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
_lowercase : int = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
_lowercase : str = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
_lowercase : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
_lowercase : int = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
_lowercase : List[Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
_lowercase : int = checkpoint['output_conv.1.weight_g']
_lowercase : List[str] = checkpoint['output_conv.1.weight_v']
_lowercase : str = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, ) -> List[str]:
if config_path is not None:
_lowercase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowercase )
else:
_lowercase : Union[str, Any] = SpeechTaHifiGanConfig()
_lowercase : Dict = SpeechTaHifiGan(_lowercase )
_lowercase : Optional[Any] = torch.load(_lowercase )
load_weights(orig_checkpoint['model']['generator'], _lowercase, _lowercase )
_lowercase : Optional[int] = np.load(_lowercase )
_lowercase : Union[str, Any] = stats[0].reshape(-1 )
_lowercase : Tuple = stats[1].reshape(-1 )
_lowercase : Any = torch.from_numpy(_lowercase ).float()
_lowercase : List[str] = torch.from_numpy(_lowercase ).float()
model.save_pretrained(_lowercase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_A : List[str] =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : int =logging.get_logger(__name__)
_A : List[Any] ={
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """beit"""
def __init__( self : Optional[Any] , UpperCamelCase_ : int=8192 , UpperCamelCase_ : str=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Dict=3072 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[Any]=224 , UpperCamelCase_ : List[str]=16 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : str=False , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : int=[3, 5, 7, 11] , UpperCamelCase_ : Optional[int]=[1, 2, 3, 6] , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=0.4 , UpperCamelCase_ : Optional[Any]=256 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Tuple=255 , **UpperCamelCase_ : Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Union[str, Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : List[Any] = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Optional[Any] = layer_norm_eps
_lowercase : Optional[int] = image_size
_lowercase : Optional[int] = patch_size
_lowercase : List[Any] = num_channels
_lowercase : Union[str, Any] = use_mask_token
_lowercase : int = use_absolute_position_embeddings
_lowercase : Union[str, Any] = use_relative_position_bias
_lowercase : List[Any] = use_shared_relative_position_bias
_lowercase : int = layer_scale_init_value
_lowercase : Union[str, Any] = drop_path_rate
_lowercase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase : Dict = out_indices
_lowercase : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase : List[str] = use_auxiliary_head
_lowercase : str = auxiliary_loss_weight
_lowercase : List[Any] = auxiliary_channels
_lowercase : Union[str, Any] = auxiliary_num_convs
_lowercase : int = auxiliary_concat_input
_lowercase : Union[str, Any] = semantic_loss_ignore_index
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_A : Optional[Any] =TypeVar('''T''')
def __UpperCamelCase ( _lowercase ) -> int:
return (position - 1) // 2
def __UpperCamelCase ( _lowercase ) -> int:
return (2 * position) + 1
def __UpperCamelCase ( _lowercase ) -> int:
return (2 * position) + 2
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ) -> None:
'''simple docstring'''
_lowercase : list[tuple[T, int]] = []
_lowercase : dict[T, int] = {}
_lowercase : int = 0
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return str(self.heap )
def __UpperCAmelCase ( self : List[str] ) -> bool:
'''simple docstring'''
return self.elements == 0
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
_lowercase : Tuple = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowercase , _lowercase : Union[str, Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowercase , _lowercase : List[str] = self.heap[0]
self._bubble_down(UpperCamelCase_ )
return elem
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : List[Any] = self.position_map[elem]
_lowercase : Optional[Any] = (elem, weight)
if position > 0:
_lowercase : Optional[Any] = get_parent_position(UpperCamelCase_ )
_lowercase , _lowercase : str = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
else:
self._bubble_down(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
_lowercase : Optional[int] = self.position_map[elem]
if curr_pos == 0:
return None
_lowercase : Optional[Any] = get_parent_position(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = self.heap[curr_pos]
_lowercase , _lowercase : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_up(UpperCamelCase_ )
return None
def __UpperCAmelCase ( self : str , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
_lowercase : Optional[Any] = self.position_map[elem]
_lowercase , _lowercase : Optional[Any] = self.heap[curr_pos]
_lowercase : Dict = get_child_left_position(UpperCamelCase_ )
_lowercase : Union[str, Any] = get_child_right_position(UpperCamelCase_ )
if child_left_position < self.elements and child_right_position < self.elements:
_lowercase , _lowercase : List[Any] = self.heap[child_left_position]
_lowercase , _lowercase : int = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
if child_left_position < self.elements:
_lowercase , _lowercase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
else:
return None
if child_right_position < self.elements:
_lowercase , _lowercase : Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase_ , UpperCamelCase_ )
return self._bubble_down(UpperCamelCase_ )
return None
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : Dict = self.heap[nodea_pos][0]
_lowercase : List[Any] = self.heap[nodea_pos][0]
_lowercase , _lowercase : Any = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowercase : str = nodea_pos
_lowercase : Optional[int] = nodea_pos
class lowerCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any ) -> None:
'''simple docstring'''
_lowercase : dict[T, dict[T, int]] = {}
_lowercase : int = 0
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self : int ) -> int:
'''simple docstring'''
return self.nodes
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_lowercase : Union[str, Any] = {}
self.nodes += 1
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : T , UpperCamelCase_ : T , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
self.add_node(UpperCamelCase_ )
self.add_node(UpperCamelCase_ )
_lowercase : int = weight
_lowercase : Tuple = weight
def __UpperCamelCase ( _lowercase, ) -> tuple[dict[T, int], dict[T, T | None]]:
_lowercase : dict[T, int] = {node: maxsize for node in graph.connections}
_lowercase : dict[T, T | None] = {node: None for node in graph.connections}
_lowercase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_lowercase, _lowercase )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowercase : Dict = priority_queue.extract_min()
_lowercase : Any = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase, dist[neighbour] )
_lowercase : str = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowercase : Dict = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_lowercase, dist[neighbour] )
_lowercase : Dict = node
return dist, parent
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowercase = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
_lowercase : Optional[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : str = floor(_lowercase )
_lowercase : Optional[int] = ceil(_lowercase )
_lowercase : Optional[int] = triangle_numbers[b_floor]
_lowercase : int = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Optional[Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Dict = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """gpt_bigcode"""
A_ = ["""past_key_values"""]
A_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase_ : List[str]=5_0257 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : str=None , UpperCamelCase_ : int="gelu_pytorch_tanh" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[Any]=1E-5 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=5_0256 , UpperCamelCase_ : Tuple=5_0256 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=True , **UpperCamelCase_ : str , ) -> str:
'''simple docstring'''
_lowercase : List[str] = vocab_size
_lowercase : Optional[int] = n_positions
_lowercase : Any = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Optional[Any] = n_head
_lowercase : Dict = n_inner
_lowercase : Dict = activation_function
_lowercase : Union[str, Any] = resid_pdrop
_lowercase : Union[str, Any] = embd_pdrop
_lowercase : str = attn_pdrop
_lowercase : Optional[int] = layer_norm_epsilon
_lowercase : List[Any] = initializer_range
_lowercase : Any = scale_attn_weights
_lowercase : Any = use_cache
_lowercase : int = attention_softmax_in_fpaa
_lowercase : Optional[Any] = scale_attention_softmax_in_fpaa
_lowercase : str = multi_query
_lowercase : List[Any] = bos_token_id
_lowercase : Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 4 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 1 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_A : Tuple =datasets.logging.get_logger(__name__)
_A : Optional[Any] ='''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_A : Dict ='''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_A : Union[str, Any] ='''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_A : Dict ={
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowercase : Optional[int] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowercase : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowercase : Tuple = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowercase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowercase : str = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ )
return {"scores": scores}
| 4 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : int ={
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A_ = Features({"""text""": Value("""string""" )} )
A_ = Features({"""summary""": Value("""string""" )} )
A_ = "text"
A_ = "summary"
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_A : List[Any] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : str=None ) -> int:
'''simple docstring'''
_lowercase : List[str] = {}
_lowercase : Any = {}
if prompt is not None:
_lowercase : List[str] = prompt
if generate_kwargs is not None:
_lowercase : Tuple = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowercase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_lowercase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , UpperCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Dict:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
if prompt is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(UpperCamelCase_ )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_lowercase : Tuple = self.model.config.model_type
if model_type == "git":
_lowercase : Tuple = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
_lowercase : List[Any] = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids
_lowercase : Dict = [self.tokenizer.cls_token_id] + input_ids
_lowercase : Any = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_lowercase : List[str] = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowercase : Optional[Any] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
_lowercase : str = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
_lowercase : Any = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowercase : Any = None
return model_inputs
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int]=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCamelCase_ )
and all(x is None for x in model_inputs['input_ids'] )
):
_lowercase : List[Any] = None
if generate_kwargs is None:
_lowercase : str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowercase : Any = model_inputs.pop(self.model.main_input_name )
_lowercase : Tuple = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_ )
return model_outputs
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : List[str] = []
for output_ids in model_outputs:
_lowercase : List[Any] = {
'generated_text': self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
# Load checkpoint
_lowercase : str = torch.load(_lowercase, map_location='cpu' )
_lowercase : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_lowercase : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowercase : int = v
else:
_lowercase : Tuple = v
_lowercase : Tuple = chkpt['params']
_lowercase : Any = {n: v for n, v in config.items() if not isinstance(_lowercase, (torch.FloatTensor, numpy.ndarray) )}
_lowercase : int = chkpt['dico_word2id']
_lowercase : Any = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@', '' ): i for s, i in vocab.items()}
# Save pytorch-model
_lowercase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowercase : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowercase : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_lowercase, _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + '\n' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_lowercase, indent=2 ) + '\n' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : str =parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> List[str]:
_lowercase , _lowercase : Optional[int] = 9, 14 # noqa: F841
_lowercase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowercase : int = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowercase : Optional[Any] = mst(_lowercase )
_lowercase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowercase : Any = tuple(answer[:2] )
_lowercase : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = None
A_ = BloomTokenizerFast
A_ = BloomTokenizerFast
A_ = True
A_ = False
A_ = """tokenizer_file"""
A_ = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
super().setUp()
_lowercase : List[str] = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_rust_tokenizer()
_lowercase : Optional[int] = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_lowercase : int = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_lowercase : Dict = tokenizer.batch_encode_plus(UpperCamelCase_ )['input_ids']
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Tuple = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[Any]=6 ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowercase : int = 'This is a simple input'
_lowercase : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
_lowercase : str = ('This is a simple input', 'This is a pair')
_lowercase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ )
tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
_lowercase : Any = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = self.get_rust_tokenizer()
_lowercase : Any = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCamelCase_ )
_lowercase : str = next(iter(UpperCamelCase_ ) )['premise'] # pick up one data
_lowercase : Optional[Any] = list(sample_data.values() )
_lowercase : Optional[Any] = list(map(tokenizer.encode , UpperCamelCase_ ) )
_lowercase : Any = [tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase : Optional[int] = text, pattern
_lowercase , _lowercase : Dict = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __UpperCAmelCase ( self : Tuple ) -> list[int]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
_lowercase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
_lowercase : Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
_lowercase : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_A : List[str] ='''ABAABA'''
_A : Optional[Any] ='''AB'''
_A : Tuple =BoyerMooreSearch(text, pattern)
_A : Union[str, Any] =bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : int =logging.get_logger(__name__)
_A : Optional[Any] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : Any ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : Union[str, Any] ={'''facebook/blenderbot-3B''': 1_2_8}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BlenderbotTokenizer
def __init__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="replace" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : str=True , **UpperCamelCase_ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
_lowercase : List[str] = add_prefix_space
_lowercase : Any = pre_tok_class(**UpperCamelCase_ )
_lowercase : Tuple = add_prefix_space
_lowercase : Optional[Any] = 'post_processor'
_lowercase : Tuple = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
_lowercase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Any = tuple(state['sep'] )
if "cls" in state:
_lowercase : int = tuple(state['cls'] )
_lowercase : Optional[int] = False
if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = add_prefix_space
_lowercase : str = True
if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets:
_lowercase : Optional[int] = trim_offsets
_lowercase : Optional[int] = True
if changes_to_apply:
_lowercase : str = getattr(UpperCamelCase_ , state.pop('type' ) )
_lowercase : List[str] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
_lowercase : List[Any] = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> BatchEncoding:
'''simple docstring'''
_lowercase : str = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
_lowercase : Union[str, Any] = ' '.join(UpperCamelCase_ )
_lowercase : Dict = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
_lowercase : int = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_A : Any =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''spiece.model'''}
_A : Union[str, Any] ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_A : Tuple ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_A : List[str] =0
_A : List[str] =1
_A : List[str] =2
_A : Tuple =3
_A : Optional[int] =4
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = """left"""
def __init__( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=False , UpperCamelCase_ : int=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int="<sep>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[int]="<cls>" , UpperCamelCase_ : int="<mask>" , UpperCamelCase_ : Optional[int]=["<eop>", "<eod>"] , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ) -> None:
'''simple docstring'''
_lowercase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
_lowercase : List[Any] = 3
_lowercase : List[str] = do_lower_case
_lowercase : List[Any] = remove_space
_lowercase : Union[str, Any] = keep_accents
_lowercase : Optional[Any] = vocab_file
_lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = self.__dict__.copy()
_lowercase : Optional[int] = None
return state
def __setstate__( self : List[Any] , UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase : int = {}
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
if self.remove_space:
_lowercase : Optional[int] = ' '.join(inputs.strip().split() )
else:
_lowercase : List[str] = inputs
_lowercase : int = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_lowercase : Tuple = unicodedata.normalize('NFKD' , UpperCamelCase_ )
_lowercase : Any = ''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase_ )] )
if self.do_lower_case:
_lowercase : int = outputs.lower()
return outputs
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = self.preprocess_text(UpperCamelCase_ )
_lowercase : Dict = self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
_lowercase : List[Any] = []
for piece in pieces:
if len(UpperCamelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowercase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase : Optional[int] = cur_pieces[1:]
else:
_lowercase : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase_ )
else:
new_pieces.append(UpperCamelCase_ )
return new_pieces
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = ''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Union[str, Any] , ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = kwargs.pop('use_source_tokenizer' , UpperCamelCase_ )
_lowercase : int = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowercase : List[Any] = []
_lowercase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
_lowercase : Optional[int] = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowercase : Tuple = ''.join(UpperCamelCase_ )
_lowercase : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowercase : List[str] = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : Any = [self.sep_token_id]
_lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1, 1]
return ([0] * len(UpperCamelCase_ )) + [1, 1]
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = [self.sep_token_id]
_lowercase : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
_lowercase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_A : Optional[int] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_A : Optional[Any] =''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowercase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowercase : List[str] = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowercase : Dict = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowercase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowercase : Any = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
_lowercase : List[Any] = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCamelCase_ , 'w' , newline='\n' ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , 'r' ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
_lowercase : str = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCamelCase_ ) , )
# Copy consistency with a really long name
_lowercase : Optional[Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCamelCase_ , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCamelCase_ ) , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowercase , _lowercase : List[Any] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
self.assertFalse(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase , _lowercase : str = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_ )
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowercase , _lowercase : Tuple = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
_A : str ='''#'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any ) -> None:
'''simple docstring'''
_lowercase : dict = {}
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : str ) -> None:
'''simple docstring'''
_lowercase : Any = self._trie
for char in text:
if char not in trie:
_lowercase : Tuple = {}
_lowercase : List[Any] = trie[char]
_lowercase : Dict = True
def __UpperCAmelCase ( self : str , UpperCamelCase_ : str ) -> tuple | list:
'''simple docstring'''
_lowercase : Any = self._trie
for char in prefix:
if char in trie:
_lowercase : int = trie[char]
else:
return []
return self._elements(UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : dict ) -> tuple:
'''simple docstring'''
_lowercase : Tuple = []
for c, v in d.items():
_lowercase : Optional[int] = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase_ )]
result.extend(UpperCamelCase_ )
return tuple(UpperCamelCase_ )
_A : Tuple =Trie()
_A : List[Any] =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __UpperCamelCase ( _lowercase ) -> tuple:
_lowercase : Optional[int] = trie.find_word(_lowercase )
return tuple(string + word for word in suffixes )
def __UpperCamelCase ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( A , A , A ):
'''simple docstring'''
A_ = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 5_0257 , UpperCamelCase_ : int = 1024 , UpperCamelCase_ : int = 768 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "gelu_new" , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : float = 0.1 , UpperCamelCase_ : float = 1E-5 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase : str = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
_lowercase : List[Any] = prefix_inner_dim
_lowercase : Optional[int] = prefix_hidden_dim
_lowercase : int = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowercase : Optional[int] = (
nn.Linear(self.prefix_hidden_dim , UpperCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowercase : Optional[int] = GPTaConfig(
vocab_size=UpperCamelCase_ , n_positions=UpperCamelCase_ , n_embd=UpperCamelCase_ , n_layer=UpperCamelCase_ , n_head=UpperCamelCase_ , n_inner=UpperCamelCase_ , activation_function=UpperCamelCase_ , resid_pdrop=UpperCamelCase_ , embd_pdrop=UpperCamelCase_ , attn_pdrop=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , initializer_range=UpperCamelCase_ , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , scale_attn_by_inverse_layer_idx=UpperCamelCase_ , reorder_and_upcast_attn=UpperCamelCase_ , )
_lowercase : Union[str, Any] = GPTaLMHeadModel(UpperCamelCase_ )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.transformer.transformer.wte(UpperCamelCase_ )
_lowercase : Any = self.encode_prefix(UpperCamelCase_ )
_lowercase : Union[str, Any] = self.decode_prefix(UpperCamelCase_ )
_lowercase : Dict = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowercase : List[str] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowercase : int = torch.cat((dummy_token, input_ids) , dim=1 )
_lowercase : Dict = self.transformer(inputs_embeds=UpperCamelCase_ , labels=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : torch.device ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(UpperCamelCase_ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.encode_prefix(UpperCamelCase_ )
@torch.no_grad()
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : List[str] = torch.split(UpperCamelCase_ , 1 , dim=0 )
_lowercase : List[Any] = []
_lowercase : Tuple = []
for feature in features:
_lowercase : Dict = self.decode_prefix(feature.to(UpperCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
_lowercase , _lowercase : Any = self.generate_beam(
input_embeds=UpperCamelCase_ , device=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowercase : Union[str, Any] = torch.stack(UpperCamelCase_ )
_lowercase : List[Any] = torch.stack(UpperCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int = 5 , UpperCamelCase_ : int = 67 , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : Optional[int] = None , ) -> str:
'''simple docstring'''
_lowercase : Dict = eos_token_id
_lowercase : Tuple = None
_lowercase : Optional[int] = None
_lowercase : List[Any] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.int )
_lowercase : Optional[int] = torch.zeros(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
_lowercase : Dict = input_embeds
else:
_lowercase : Optional[int] = self.transformer.transformer.wte(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
_lowercase : List[Any] = self.transformer(inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = outputs.logits
_lowercase : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowercase : List[str] = logits.softmax(-1 ).log()
if scores is None:
_lowercase , _lowercase : Optional[int] = logits.topk(UpperCamelCase_ , -1 )
_lowercase : Optional[Any] = generated.expand(UpperCamelCase_ , *generated.shape[1:] )
_lowercase , _lowercase : Optional[int] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowercase : List[str] = next_tokens
else:
_lowercase : Union[str, Any] = tokens.expand(UpperCamelCase_ , *tokens.shape[1:] )
_lowercase : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowercase : Optional[Any] = -float(np.inf )
_lowercase : Any = 0
_lowercase : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowercase : List[Any] = scores_sum / seq_lengths[:, None]
_lowercase , _lowercase : Optional[Any] = scores_sum_average.view(-1 ).topk(UpperCamelCase_ , -1 )
_lowercase : str = next_tokens // scores_sum.shape[1]
_lowercase : int = seq_lengths[next_tokens_source]
_lowercase : List[str] = next_tokens % scores_sum.shape[1]
_lowercase : Optional[int] = next_tokens.unsqueeze(1 )
_lowercase : Optional[Any] = tokens[next_tokens_source]
_lowercase : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
_lowercase : Optional[int] = generated[next_tokens_source]
_lowercase : List[Any] = scores_sum_average * seq_lengths
_lowercase : Dict = is_stopped[next_tokens_source]
_lowercase : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowercase : Dict = torch.cat((generated, next_token_embed) , dim=1 )
_lowercase : List[Any] = is_stopped + next_tokens.eq(UpperCamelCase_ ).squeeze()
if is_stopped.all():
break
_lowercase : int = scores / seq_lengths
_lowercase : Optional[int] = scores.argsort(descending=UpperCamelCase_ )
# tokens tensors are already padded to max_seq_length
_lowercase : Any = [tokens[i] for i in order]
_lowercase : Tuple = torch.stack(UpperCamelCase_ , dim=0 )
_lowercase : Optional[int] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : List[Any] = min(_lowercase ) # min() finds the minimum value
_lowercase : List[str] = max(_lowercase ) # max() finds the maximum value
_lowercase : Dict = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowercase : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowercase, _lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowercase : int = 0
for count in range(_lowercase ):
while holes[count] > 0:
holes[count] -= 1
_lowercase : List[str] = count + min_val
i += 1
def __UpperCamelCase ( ) -> Optional[int]:
_lowercase : Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowercase )
print('Sorted order is:', ' '.join(_lowercase ) )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A : Union[str, Any] ={
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : int ={
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =[
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =[
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import math
import os
import sys
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Dict = ''
try:
with open(_lowercase, 'rb' ) as binary_file:
_lowercase : List[Any] = binary_file.read()
for dat in data:
_lowercase : Optional[Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase ) -> None:
lexicon.pop(_lowercase )
_lowercase : Dict = last_match_id
if math.loga(_lowercase ).is_integer():
for curr_key in lexicon:
_lowercase : Optional[int] = '0' + lexicon[curr_key]
_lowercase : Union[str, Any] = bin(_lowercase )[2:]
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Tuple = {'0': '0', '1': '1'}
_lowercase , _lowercase : Any = '', ''
_lowercase : Dict = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowercase : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowercase, _lowercase, _lowercase, _lowercase )
index += 1
_lowercase : Tuple = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_lowercase : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Union[str, Any] = os.path.getsize(_lowercase )
_lowercase : Optional[Any] = bin(_lowercase )[2:]
_lowercase : str = len(_lowercase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = 8
try:
with open(_lowercase, 'wb' ) as opened_file:
_lowercase : str = [
to_write[i : i + byte_length]
for i in range(0, len(_lowercase ), _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowercase, 2 ).to_bytes(1, byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCamelCase ( _lowercase, _lowercase ) -> None:
_lowercase : Dict = read_file_binary(_lowercase )
_lowercase : Optional[Any] = compress_data(_lowercase )
_lowercase : int = add_file_length(_lowercase, _lowercase )
write_file_binary(_lowercase, _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_A : Any =logging.get_logger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : str = set()
_lowercase : Optional[Any] = []
def parse_line(_lowercase ):
for line in fp:
if isinstance(_lowercase, _lowercase ):
_lowercase : List[str] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase ) > 0:
_lowercase : List[str] = '\n'.join(_lowercase )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_lowercase )
buffer.clear()
continue
else:
_lowercase : Any = line.strip()
buffer.append(_lowercase )
if from_gh:
for filename in os.listdir(_lowercase ):
_lowercase : Dict = os.path.join(_lowercase, _lowercase )
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase ) as fp:
parse_line(_lowercase )
else:
try:
with zipfile.ZipFile(_lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase ) as fp:
parse_line(_lowercase )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : Tuple = set()
_lowercase : Any = [os.path.join(_lowercase, _lowercase ) for p in os.listdir(_lowercase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase, _lowercase ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
return values.split(',' )
_A : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
_A : Any =parser.parse_args()
_A : Tuple =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_A : Dict =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_A : Union[str, Any] =extract_warnings(args.output_dir, args.targets)
_A : str =sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : List[Any] = size
_lowercase : int = [0] * size
_lowercase : Dict = [0] * size
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : Optional[int] = value
while index < self.size:
_lowercase : int = self.get_prev(UpperCamelCase_ ) + 1
if current_left_border == index:
_lowercase : Dict = value
else:
_lowercase : Dict = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = self.get_next(UpperCamelCase_ )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_lowercase : Any = 0
while left <= right:
_lowercase : Optional[int] = self.get_prev(UpperCamelCase_ )
if left <= current_left:
_lowercase : Dict = max(UpperCamelCase_ , self.tree[right] )
_lowercase : int = current_left
else:
_lowercase : Any = max(UpperCamelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_A : str =logging.get_logger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> None:
_lowercase : List[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_lowercase ) == len(_lowercase ), f'''{len(_lowercase )} != {len(_lowercase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_A : Dict ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_A : str ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __UpperCamelCase ( _lowercase, _lowercase ) -> Any:
try:
_lowercase : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(_lowercase ) )
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(_lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCamelCase ( _lowercase, _lowercase = "student", _lowercase = None, _lowercase = None, _lowercase=False, _lowercase=None, _lowercase=None, **_lowercase, ) -> Tuple[PreTrainedModel, List[int], List[int]]:
_lowercase : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_lowercase, _lowercase ):
AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase ) # purely for convenience
_lowercase : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).eval()
else:
assert isinstance(_lowercase, _lowercase ), f'''teacher must be a model or string got type {type(_lowercase )}'''
_lowercase : str = teacher.config.to_diff_dict()
try:
_lowercase , _lowercase : int = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_lowercase : Optional[int] = teacher_e
if d is None:
_lowercase : List[str] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config, 'num_encoder_layers' ):
_lowercase , _lowercase : List[str] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_lowercase , _lowercase : List[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_lowercase : Any = teacher_e
if d is None:
_lowercase : Tuple = teacher_d
if hasattr(teacher.config, 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_lowercase )
# Copy weights
_lowercase : List[Any] = teacher.config_class(**_lowercase )
_lowercase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(_lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_lowercase : Tuple = student.load_state_dict(teacher.state_dict(), strict=_lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_lowercase , _lowercase : Dict = list(range(_lowercase ) ), list(range(_lowercase ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(_lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_lowercase : List[int] = pick_layers_to_copy(_lowercase, _lowercase )
if d_layers_to_copy is None:
_lowercase : List[int] = pick_layers_to_copy(_lowercase, _lowercase )
try:
if hasattr(
_lowercase, 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, _lowercase )
copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, _lowercase )
else:
copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, _lowercase )
copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, _lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block, student.encoder.block, _lowercase )
copy_layers(teacher.decoder.block, student.decoder.block, _lowercase )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_lowercase : List[str] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : Dict = dataset
_lowercase : Optional[int] = process
_lowercase : Any = params
def __len__( self : List[str] ) -> str:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = self.dataset[i]
_lowercase : Union[str, Any] = self.process(UpperCamelCase_ , **self.params )
return processed
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple=None ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = loader
_lowercase : List[str] = infer
_lowercase : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowercase : List[str] = None
_lowercase : Any = loader_batch_size
# Internal bookkeeping
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
def __len__( self : Union[str, Any] ) -> Any:
'''simple docstring'''
return len(self.loader )
def __iter__( self : Dict ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = iter(self.loader )
return self
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowercase : List[str] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowercase : Dict = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Convert ModelOutput to tuple first
_lowercase : List[str] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowercase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowercase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowercase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Any = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowercase : Any = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowercase : Optional[int] = self._loader_batch_data.__class__(UpperCamelCase_ )
self._loader_batch_index += 1
return result
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowercase : Optional[int] = next(self.iterator )
_lowercase : Optional[Any] = self.infer(UpperCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase_ , torch.Tensor ):
_lowercase : Tuple = processed
else:
_lowercase : Optional[int] = list(processed.keys() )[0]
_lowercase : Optional[int] = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Optional[Any] = len(UpperCamelCase_ )
else:
_lowercase : Optional[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Any = observed_batch_size
# Setting internal index to unwrap the batch
_lowercase : str = processed
_lowercase : Tuple = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=None ) -> Dict:
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __iter__( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = iter(self.loader )
_lowercase : str = None
return self
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.subiterator is None:
_lowercase : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowercase : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowercase : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
_lowercase : Optional[Any] = next(self.subiterator )
return processed
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __iter__( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : List[Any] = iter(self.loader )
return self
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : int = False
_lowercase : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Union[str, Any] = self.loader_batch_item()
_lowercase : List[Any] = item.pop('is_last' )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
while not is_last:
_lowercase : Optional[int] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase_ , torch.Tensor ):
_lowercase : List[Any] = processed
else:
_lowercase : List[str] = list(processed.keys() )[0]
_lowercase : List[Any] = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : int = len(UpperCamelCase_ )
else:
_lowercase : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Optional[Any] = observed_batch_size
_lowercase : Tuple = processed
_lowercase : Tuple = 0
while self._loader_batch_index < self.loader_batch_size:
_lowercase : List[str] = self.loader_batch_item()
_lowercase : Union[str, Any] = item.pop('is_last' )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
else:
_lowercase : Dict = processed
_lowercase : Tuple = item.pop('is_last' )
accumulator.append(UpperCamelCase_ )
return accumulator
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : Dataset , UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = dataset
_lowercase : str = key
def __len__( self : int ) -> Any:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
return self.dataset[i][self.key]
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Dataset , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = dataset
_lowercase : int = keya
_lowercase : List[str] = keya
def __len__( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple , UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 4 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 1 |
'''simple docstring'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = arr.split(',' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = [int(self.array[0] )] * len(self.array )
_lowercase : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_lowercase : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_lowercase : Any = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_A : Optional[int] =input('''please input some numbers:''')
_A : Optional[int] =SubArray(whole_array)
_A : Optional[int] =array.solve_sub_array()
print(('''the results is:''', re))
| 4 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """bert"""
def __init__( self : int , UpperCamelCase_ : List[str]=3_0522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Dict=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Union[str, Any]=512 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : Dict = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Tuple = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : str = position_embedding_type
_lowercase : Optional[int] = use_cache
_lowercase : str = classifier_dropout
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowercase : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
from typing import Any
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = data
_lowercase : Union[str, Any] = None
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : Dict = self.head
while node:
yield node.data
_lowercase : str = node.next
def __len__( self : str ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __getitem__( self : Union[str, Any] , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_lowercase : Dict = self.head
for _ in range(UpperCamelCase_ ):
_lowercase : List[str] = current.next
_lowercase : Optional[int] = data
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_lowercase : List[str] = Node(UpperCamelCase_ )
if self.head is None:
_lowercase : List[Any] = new_node
elif index == 0:
_lowercase : Dict = self.head # link new_node to head
_lowercase : List[Any] = new_node
else:
_lowercase : Tuple = self.head
for _ in range(index - 1 ):
_lowercase : Tuple = temp.next
_lowercase : Union[str, Any] = temp.next
_lowercase : List[Any] = new_node
def __UpperCAmelCase ( self : Optional[int] ) -> None: # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_lowercase : List[Any] = self.head # default first node
if index == 0:
_lowercase : Union[str, Any] = self.head.next
else:
_lowercase : Dict = self.head
for _ in range(index - 1 ):
_lowercase : str = temp.next
_lowercase : Any = temp.next
_lowercase : Dict = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self : Tuple ) -> bool:
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self : Optional[int] ) -> None:
'''simple docstring'''
_lowercase : List[str] = None
_lowercase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
_lowercase : List[Any] = current.next
# Make the current node's next point backwards
_lowercase : List[Any] = prev
# Make the previous node be the current node
_lowercase : Optional[Any] = current
# Make the current node the next node (to progress iteration)
_lowercase : Optional[Any] = next_node
# Return prev in order to put the head at the end
_lowercase : Any = prev
def __UpperCamelCase ( ) -> None:
_lowercase : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowercase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowercase ) == i
linked_list.insert_nth(_lowercase, i + 1 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowercase ) == 9
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
_lowercase : int = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(_lowercase ) == "->".join(str(_lowercase ) for i in range(-8, 1 ) )
def __UpperCamelCase ( ) -> None:
_lowercase : List[Any] = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_lowercase : int = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowercase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowercase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_lowercase : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_lowercase : int = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_lowercase : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowercase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowercase )
assert (
str(_lowercase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowercase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> Dict:
from doctest import testmod
testmod()
_lowercase : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowercase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
_lowercase : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowercase )
print(f'''length of linked_list is : {len(_lowercase )}''' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( _lowercase ) -> int:
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( _lowercase ) -> List[str]:
_lowercase : Union[str, Any] = np.max(_outputs, axis=-1, keepdims=_lowercase )
_lowercase : List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowercase )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """sigmoid"""
A_ = """softmax"""
A_ = """none"""
@add_end_docstrings(
A , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = False
A_ = ClassificationFunction.NONE
def __init__( self : List[Any] , **UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="" , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[int] = tokenizer_kwargs
_lowercase : Optional[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
_lowercase : Union[str, Any] = self.model.config.return_all_scores
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) or top_k is None:
_lowercase : List[Any] = top_k
_lowercase : Tuple = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , UpperCamelCase_ , )
if return_all_scores:
_lowercase : Optional[int] = None
else:
_lowercase : int = 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowercase : Optional[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[int] = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowercase : Union[str, Any] = 'top_k' not in kwargs
if isinstance(args[0] , UpperCamelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Dict[str, GenericTensor]:
'''simple docstring'''
_lowercase : Optional[Any] = self.framework
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.tokenizer(**UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1 and isinstance(inputs[0] , UpperCamelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
return self.model(**UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : List[Any]=True ) -> Union[str, Any]:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowercase : Any = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowercase : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
_lowercase : List[Any] = self.model.config.function_to_apply
else:
_lowercase : List[str] = ClassificationFunction.NONE
_lowercase : Optional[Any] = model_outputs['logits'][0]
_lowercase : int = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowercase : Union[str, Any] = sigmoid(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowercase : List[Any] = softmax(UpperCamelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
_lowercase : List[str] = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowercase : Union[str, Any] = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(UpperCamelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k is not None:
_lowercase : Any = dict_scores[:top_k]
return dict_scores
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : int = k_size // 2
_lowercase , _lowercase : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowercase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[int]:
_lowercase , _lowercase : Any = image.shape[0], image.shape[1]
# dst image height and width
_lowercase : List[str] = height - k_size + 1
_lowercase : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowercase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
_lowercase : List[str] = 0
for i, j in product(range(_lowercase ), range(_lowercase ) ):
_lowercase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowercase : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowercase : Optional[Any] = gen_gaussian_kernel(_lowercase, _lowercase )
_lowercase : str = ravel(_lowercase )
# reshape and get the dst image
_lowercase : List[str] = dot(_lowercase, _lowercase ).reshape(_lowercase, _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
_A : Optional[int] =imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_A : Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_A : Dict =gaussian_filter(gray, 3, sigma=1)
_A : Optional[int] =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_A : Optional[int] =logging.get_logger(__name__)
_A : Optional[int] ={
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """marian"""
A_ = ["""past_key_values"""]
A_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , UpperCamelCase_ : Union[str, Any]=5_8101 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=1024 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : str=4096 , UpperCamelCase_ : Optional[int]=16 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Union[str, Any]=4096 , UpperCamelCase_ : Tuple=16 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : str=5_8100 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Union[str, Any]=5_8100 , UpperCamelCase_ : int=0 , UpperCamelCase_ : str=0 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Any , ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = vocab_size
_lowercase : Tuple = decoder_vocab_size or vocab_size
_lowercase : Optional[int] = max_position_embeddings
_lowercase : Tuple = d_model
_lowercase : Optional[Any] = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : List[Any] = encoder_attention_heads
_lowercase : int = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : List[Any] = decoder_attention_heads
_lowercase : Tuple = dropout
_lowercase : int = attention_dropout
_lowercase : str = activation_dropout
_lowercase : int = activation_function
_lowercase : Any = init_std
_lowercase : Dict = encoder_layerdrop
_lowercase : Optional[Any] = decoder_layerdrop
_lowercase : List[Any] = use_cache
_lowercase : List[str] = encoder_layers
_lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase : Tuple = {0: 'batch'}
_lowercase : List[str] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
_lowercase : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase : List[Any] = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : str = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : List[str] = super().outputs
else:
_lowercase : int = super(UpperCamelCase_ , self ).outputs
if self.use_past:
_lowercase , _lowercase : str = self.num_layers
for i in range(UpperCamelCase_ ):
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
_lowercase : Any = seq_length if not self.use_past else 1
_lowercase : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : Optional[int] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowercase : str = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase : Optional[Any] = common_inputs['input_ids'].shape
_lowercase : List[str] = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase : Tuple = self.num_attention_heads
_lowercase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : List[Any] = decoder_seq_length + 3
_lowercase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase : Dict = self.num_layers
_lowercase : Dict = min(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
_lowercase : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
_lowercase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase : Any = seqlen + 2
_lowercase , _lowercase : Union[str, Any] = self.num_layers
_lowercase , _lowercase : List[Any] = self.num_attention_heads
_lowercase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase : Union[str, Any] = common_inputs['attention_mask'].dtype
_lowercase : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
_lowercase : Optional[Any] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __UpperCAmelCase ( self : int , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_lowercase : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase : int = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
_lowercase : int = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_lowercase : Optional[Any] = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase : List[str] = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
_lowercase : Dict = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowercase : Tuple = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
_lowercase : int = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = 0
A_ = False
A_ = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowercase : int = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowercase : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
_A : List[Any] =DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_A : str =Accelerator(kwargs_handlers=[ddp_scaler])
_A : str =torch.nn.Linear(1_0_0, 2_0_0)
_A : List[str] =accelerator.prepare(model)
# Check the values changed in kwargs
_A : List[str] =''''''
_A : Dict =model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_A : Optional[Any] =logging.get_logger(__name__)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase = None ) -> str:
_lowercase : Any = tesseract_config if tesseract_config is not None else ''
# apply OCR
_lowercase : Any = to_pil_image(_lowercase )
_lowercase , _lowercase : Optional[int] = pil_image.size
_lowercase : List[str] = pytesseract.image_to_data(_lowercase, lang=_lowercase, output_type='dict', config=_lowercase )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_lowercase : Dict = [idx for idx, word in enumerate(_lowercase ) if not word.strip()]
_lowercase : Optional[int] = [word for idx, word in enumerate(_lowercase ) if idx not in irrelevant_indices]
_lowercase : Dict = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_lowercase : int = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_lowercase : Any = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
_lowercase : List[str] = [coord for idx, coord in enumerate(_lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowercase : List[Any] = []
for x, y, w, h in zip(_lowercase, _lowercase, _lowercase, _lowercase ):
_lowercase : Dict = [x, y, x + w, y + h]
actual_boxes.append(_lowercase )
# finally, normalize the bounding boxes
_lowercase : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowercase, _lowercase, _lowercase ) )
assert len(_lowercase ) == len(_lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""pixel_values"""]
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "" , **UpperCamelCase_ : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Tuple = size if size is not None else {'height': 224, 'width': 224}
_lowercase : str = get_size_dict(UpperCamelCase_ )
_lowercase : Union[str, Any] = do_resize
_lowercase : int = size
_lowercase : List[Any] = resample
_lowercase : List[str] = apply_ocr
_lowercase : Dict = ocr_lang
_lowercase : Any = tesseract_config
def __UpperCAmelCase ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Dict , ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowercase : Tuple = (size['height'], size['width'])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Any , ) -> PIL.Image.Image:
'''simple docstring'''
_lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Dict = get_size_dict(UpperCamelCase_ )
_lowercase : Union[str, Any] = resample if resample is not None else self.resample
_lowercase : int = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowercase : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowercase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowercase : Union[str, Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_lowercase : str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_lowercase : Tuple = []
_lowercase : Optional[Any] = []
for image in images:
_lowercase , _lowercase : List[str] = apply_tesseract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
words_batch.append(UpperCamelCase_ )
boxes_batch.append(UpperCamelCase_ )
if do_resize:
_lowercase : Any = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowercase : Tuple = [flip_channel_order(UpperCamelCase_ ) for image in images]
_lowercase : int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : int = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase_ )
if apply_ocr:
_lowercase : Dict = words_batch
_lowercase : int = boxes_batch
return data
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_A : Optional[Any] =yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
_A : Optional[int] ={
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_A : List[Any] ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Optional[int] ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Any ={
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_A : Optional[Any] ='''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Dict =(
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
_A : List[str] ='''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : List[str] =(
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
_A : Optional[Any] ='''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : str ='''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
_A : List[str] ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Optional[Any] ='''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
_A : Dict ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
_A : Union[str, Any] ='''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
_A : Any ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
_A : List[str] ='''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
_A : Tuple ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
_A : str ='''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
_A : Any ='''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : str ='''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
_A : Optional[Any] ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
_A : List[Any] ='''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
_A : Tuple ='''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Union[str, Any] ='''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
_A : Union[str, Any] =''''''
_A : Any ='''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
_A : int ='''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_A : Optional[Any] ='''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert ReadMe.from_string(_lowercase, _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Tuple:
with pytest.raises(_lowercase, match=re.escape(expected_error.format(path='root' ) ) ):
_lowercase : Optional[Any] = ReadMe.from_string(_lowercase, _lowercase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Any:
with pytest.raises(_lowercase, match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(_lowercase, _lowercase )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCamelCase ( _lowercase ) -> str:
ReadMe.from_string(_lowercase, _lowercase, suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : List[Any] = Path(_lowercase ) / 'README.md'
with open(_lowercase, 'w+' ) as readme_file:
readme_file.write(_lowercase )
_lowercase : List[Any] = ReadMe.from_readme(_lowercase, _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : str = Path(_lowercase ) / 'README.md'
with open(_lowercase, 'w+' ) as readme_file:
readme_file.write(_lowercase )
_lowercase : Optional[Any] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase, match=re.escape(_lowercase ) ):
_lowercase : int = ReadMe.from_readme(_lowercase, _lowercase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Optional[Any] = Path(_lowercase ) / 'README.md'
with open(_lowercase, 'w+' ) as readme_file:
readme_file.write(_lowercase )
_lowercase : Optional[Any] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase, match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase, _lowercase )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def __UpperCamelCase ( _lowercase ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase : Optional[Any] = Path(_lowercase ) / 'README.md'
with open(_lowercase, 'w+' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase, _lowercase, suppress_parsing_errors=_lowercase )
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
import os
def __UpperCamelCase ( _lowercase = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(_lowercase ), _lowercase ) ) as input_file:
_lowercase : Optional[Any] = [
[int(_lowercase ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase : Any = len(_lowercase )
_lowercase : Dict = len(matrix[0] )
_lowercase : Tuple = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
_lowercase : Union[str, Any] = matrix[i][0]
for j in range(1, _lowercase ):
for i in range(_lowercase ):
_lowercase : Tuple = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1, _lowercase ):
_lowercase : List[str] = min(
minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2, -1, -1 ):
_lowercase : Optional[Any] = min(
minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[int]=99 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Dict=37 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : List[Any]=None , ) -> int:
'''simple docstring'''
_lowercase : Any = parent
_lowercase : Dict = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[int] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : Optional[Any] = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[int] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Union[str, Any] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : Dict = scope
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = None
if self.use_input_mask:
_lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Any = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase_ , )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = OpenLlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
_lowercase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[int] = True
_lowercase : int = OpenLlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : Union[str, Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
_lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , ) -> Any:
'''simple docstring'''
_lowercase : Tuple = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = True
_lowercase : Optional[Any] = True
_lowercase : Optional[Any] = OpenLlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
_lowercase : Any = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
_lowercase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase : int = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['hidden_states'][0]
_lowercase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['hidden_states'][0]
# select random slice
_lowercase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A_ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A_ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = OpenLlamaModelTester(self )
_lowercase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : int = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = 3
_lowercase : Optional[Any] = input_dict['input_ids']
_lowercase : Dict = input_ids.ne(1 ).to(UpperCamelCase_ )
_lowercase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase : str = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = 3
_lowercase : int = 'single_label_classification'
_lowercase : Dict = input_dict['input_ids']
_lowercase : int = input_ids.ne(1 ).to(UpperCamelCase_ )
_lowercase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase : Any = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Any = 3
_lowercase : List[str] = 'multi_label_classification'
_lowercase : List[Any] = input_dict['input_ids']
_lowercase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
_lowercase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase : Union[str, Any] = OpenLlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
_lowercase : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : List[str] = OpenLlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
_lowercase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
_lowercase : Dict = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowercase : List[str] = {'type': scaling_type, 'factor': 10.0}
_lowercase : Any = OpenLlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
_lowercase : Tuple = scaled_model(UpperCamelCase_ ).last_hidden_state
_lowercase : int = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[str] =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
import math
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if (
not isinstance(_lowercase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if (
not isinstance(_lowercase, (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A : Optional[Any] =[
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
_A : List[str] =[
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
_A : int =(
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A : Any =(
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
_A : Optional[Any] =[
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
for tf_name, hf_name in patterns:
_lowercase : List[str] = k.replace(_lowercase, _lowercase )
return k
def __UpperCamelCase ( _lowercase, _lowercase ) -> BigBirdPegasusForConditionalGeneration:
_lowercase : List[str] = BigBirdPegasusConfig(**_lowercase )
_lowercase : List[Any] = BigBirdPegasusForConditionalGeneration(_lowercase )
_lowercase : Any = torch_model.state_dict()
_lowercase : List[str] = {}
# separating decoder weights
_lowercase : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowercase : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items(), 'tf -> hf conversion' ):
_lowercase : Tuple = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
_lowercase : str = DECODER_PATTERNS
_lowercase : Optional[Any] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowercase : Union[str, Any] = v.T
_lowercase : str = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), 'tf -> hf conversion' ):
_lowercase : str = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
_lowercase : Tuple = REMAINING_PATTERNS
_lowercase : Optional[Any] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowercase : List[Any] = v.T
_lowercase : Optional[Any] = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
_lowercase : Any = mapping['model.embed_positions.weight']
_lowercase : Optional[Any] = mapping.pop('model.embed_positions.weight' )
_lowercase , _lowercase : Any = torch_model.load_state_dict(_lowercase, strict=_lowercase )
_lowercase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( _lowercase ) -> Dict:
_lowercase : Tuple = tf.train.list_variables(_lowercase )
_lowercase : Union[str, Any] = {}
_lowercase : Optional[Any] = ['global_step']
for name, shape in tqdm(_lowercase, desc='converting tf checkpoint to dict' ):
_lowercase : str = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowercase : Optional[int] = tf.train.load_variable(_lowercase, _lowercase )
_lowercase : str = array
return tf_weights
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : int = get_tf_weights_as_numpy(_lowercase )
_lowercase : Dict = convert_bigbird_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : int =parser.parse_args()
_A : int ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 4 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : List[str] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : str ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class lowerCamelCase__ ( metaclass=A ):
'''simple docstring'''
A_ = ["""flax""", """transformers"""]
def __init__( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = self.dummy_uncond_unet
_lowercase : List[str] = ScoreSdeVeScheduler()
_lowercase : Optional[int] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[str] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ ).images
_lowercase : str = torch.manual_seed(0 )
_lowercase : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=UpperCamelCase_ , return_dict=UpperCamelCase_ )[
0
]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = 'google/ncsnpp-church-256'
_lowercase : int = UNetaDModel.from_pretrained(UpperCamelCase_ )
_lowercase : List[Any] = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase_ )
_lowercase : Optional[Any] = ScoreSdeVePipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
sde_ve.to(UpperCamelCase_ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = torch.manual_seed(0 )
_lowercase : Any = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=UpperCamelCase_ ).images
_lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_lowercase : Tuple = b * b - 4 * a * c
_lowercase : List[Any] = (-b + sqrt(_lowercase )) / (2 * a)
_lowercase : Dict = (-b - sqrt(_lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __UpperCamelCase ( ) -> List[Any]:
_lowercase , _lowercase : Optional[Any] = quadratic_roots(a=5, b=6, c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCamelCase ( ) -> None:
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 4 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_A : List[Any] =[int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __UpperCamelCase ( ) -> Optional[Any]:
_lowercase : Dict = os.path.dirname(os.path.realpath(_lowercase ) )
_lowercase : Optional[Any] = os.path.join(_lowercase, 'words.txt' )
_lowercase : List[Any] = ''
with open(_lowercase ) as f:
_lowercase : str = f.readline()
_lowercase : Dict = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_lowercase : Union[str, Any] = [
word
for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowercase )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 1 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_A : Dict =logging.getLogger(__name__)
def __UpperCamelCase ( _lowercase=2, _lowercase=3, _lowercase=16, _lowercase = 10, _lowercase = 2 ) -> Tuple:
def get_dataset(_lowercase ):
_lowercase : int = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(_lowercase, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
_lowercase : Dict = get_dataset(_lowercase )
_lowercase : Dict = get_dataset(_lowercase )
_lowercase : str = DataLoader(_lowercase, shuffle=_lowercase, batch_size=_lowercase, num_workers=4 )
_lowercase : Any = DataLoader(_lowercase, shuffle=_lowercase, batch_size=_lowercase, num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase=None ) -> Dict:
_lowercase : Any = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
_lowercase , _lowercase : List[Any] = batch
_lowercase : Dict = model(_lowercase )
_lowercase : Any = torch.nn.functional.mse_loss(_lowercase, _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__()
_lowercase : Any = nn.Parameter(torch.randn(1 ) )
_lowercase : Optional[Any] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return x * self.a + self.b
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Union[str, Any] = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Union[str, Any] = dummy_dataloaders()
_lowercase : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : List[Any] = Accelerator(project_config=UpperCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Dict = dummy_dataloaders()
# Train baseline
_lowercase : List[Any] = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
_lowercase : Any = os.path.join(UpperCamelCase_ , 'initial' )
accelerator.save_state(UpperCamelCase_ )
((_lowercase) , (_lowercase)) : Union[str, Any] = model.a.item(), model.b.item()
_lowercase : Optional[Any] = optimizer.state_dict()
_lowercase : int = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((_lowercase) , (_lowercase)) : str = model.a.item(), model.b.item()
_lowercase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Any = dummy_dataloaders()
_lowercase : List[str] = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((_lowercase) , (_lowercase)) : List[Any] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : str = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
_lowercase : Tuple = os.path.join(UpperCamelCase_ , 'checkpoint' )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((_lowercase) , (_lowercase)) : str = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Dict = dummy_dataloaders()
_lowercase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : Optional[Any] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : int = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
((_lowercase) , (_lowercase)) : int = model.a.item(), model.b.item()
_lowercase : Dict = optimizer.state_dict()
_lowercase : Optional[int] = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : Tuple = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : List[Any] = dummy_dataloaders()
_lowercase : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
_lowercase : Optional[int] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase : Dict = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) )
((_lowercase) , (_lowercase)) : str = model.a.item(), model.b.item()
_lowercase : Tuple = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((_lowercase) , (_lowercase)) : Any = model.a.item(), model.b.item()
_lowercase : List[Any] = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase : str = torch.tensor([1, 2, 3] )
_lowercase : Union[str, Any] = torch.tensor([2, 3, 4] )
_lowercase : Union[str, Any] = DummyModel()
_lowercase : Union[str, Any] = torch.optim.Adam(net.parameters() )
_lowercase : int = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Dict = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : List[str] = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
_lowercase , _lowercase : str = dummy_dataloaders()
_lowercase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
_lowercase : List[str] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
_lowercase : str = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
_lowercase : List[str] = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
_lowercase : List[Any] = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : List[str] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
_A : Union[str, Any] ='''/tmp/accelerate/state_checkpointing'''
_A : Any =DummyModel()
_A : int =torch.optim.Adam(params=model.parameters(), lr=1e-3)
_A : str =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_A , _A : int =dummy_dataloaders()
_A : Dict =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_A : Optional[Any] =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_A , _A , _A , _A , _A : Optional[Any] =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_A , _A : Any =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_A : List[Any] =group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
_A : Optional[Any] =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
_A : Tuple =group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
_A : Optional[int] =group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 4 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 1 |
'''simple docstring'''
from collections import deque
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> None:
'''simple docstring'''
_lowercase : Dict = process_name # process name
_lowercase : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase : Optional[Any] = arrival_time
_lowercase : int = burst_time # remaining burst time
_lowercase : Optional[Any] = 0 # total time of the process wait in ready queue
_lowercase : Dict = 0 # time from arrival time to completion time
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : list[int] , UpperCamelCase_ : deque[Process] , UpperCamelCase_ : int , ) -> None:
'''simple docstring'''
_lowercase : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase : Optional[Any] = time_slices
# unfinished process is in this ready_queue
_lowercase : List[str] = queue
# current time
_lowercase : List[Any] = current_time
# finished process is in this sequence queue
_lowercase : deque[Process] = deque()
def __UpperCAmelCase ( self : List[str] ) -> list[str]:
'''simple docstring'''
_lowercase : Tuple = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for i in range(len(UpperCamelCase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : list[Process] ) -> list[int]:
'''simple docstring'''
_lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : deque[Process] ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Process ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : deque[Process] ) -> deque[Process]:
'''simple docstring'''
_lowercase : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase_ ) != 0:
_lowercase : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase : Any = 0
# set the process's turnaround time because it is finished
_lowercase : Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
_lowercase : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : deque[Process] , UpperCamelCase_ : int ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
_lowercase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase_ ) ):
_lowercase : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase : Dict = 0
# set the finish time
_lowercase : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_lowercase : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Optional[int] ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_lowercase , _lowercase : List[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Any =Process('''P1''', 0, 5_3)
_A : List[Any] =Process('''P2''', 0, 1_7)
_A : Dict =Process('''P3''', 0, 6_8)
_A : str =Process('''P4''', 0, 2_4)
_A : List[str] =3
_A : Optional[Any] =[1_7, 2_5]
_A : Any =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : List[Any] =Process('''P1''', 0, 5_3)
_A : Any =Process('''P2''', 0, 1_7)
_A : int =Process('''P3''', 0, 6_8)
_A : Any =Process('''P4''', 0, 2_4)
_A : List[Any] =3
_A : List[Any] =[1_7, 2_5]
_A : List[str] =deque([Pa, Pa, Pa, Pa])
_A : Union[str, Any] =MLFQ(number_of_queues, time_slices, queue, 0)
_A : str =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 4 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase, int(b / 2 ) ) * actual_power(_lowercase, int(b / 2 ) )
else:
return a * actual_power(_lowercase, int(b / 2 ) ) * actual_power(_lowercase, int(b / 2 ) )
def __UpperCamelCase ( _lowercase, _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase, _lowercase )
return actual_power(_lowercase, _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : str ={
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert isinstance(_lowercase, _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : List[Any] = TextDatasetReader(_lowercase, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Tuple:
_lowercase : List[str] = tmp_path / 'cache'
_lowercase : Optional[int] = {'text': 'string'}
_lowercase : int = features.copy() if features else default_expected_features
_lowercase : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Tuple = TextDatasetReader(_lowercase, features=_lowercase, cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
_lowercase : Tuple = TextDatasetReader(_lowercase, cache_dir=_lowercase, split=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
if issubclass(_lowercase, _lowercase ):
_lowercase : Any = text_path
elif issubclass(_lowercase, _lowercase ):
_lowercase : Any = [text_path]
_lowercase : List[Any] = tmp_path / 'cache'
_lowercase : Any = {'text': 'string'}
_lowercase : List[str] = TextDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase, _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase=("train",) ) -> Optional[Any]:
assert isinstance(_lowercase, _lowercase )
for split in splits:
_lowercase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Any = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : List[Any] = TextDatasetReader({'train': text_path}, cache_dir=_lowercase, keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> List[Any]:
_lowercase : str = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowercase : Optional[int] = {'text': 'string'}
_lowercase : Union[str, Any] = features.copy() if features else default_expected_features
_lowercase : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Any = TextDatasetReader({'train': text_path}, features=_lowercase, cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> int:
if split:
_lowercase : Dict = {split: text_path}
else:
_lowercase : Union[str, Any] = 'train'
_lowercase : Union[str, Any] = {'train': text_path, 'test': text_path}
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'text': 'string'}
_lowercase : Optional[int] = TextDatasetReader(_lowercase, cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase, _lowercase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int = None , UpperCamelCase_ : int = None ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : Tuple = pad_token_id
_lowercase : int = max_length
_lowercase : List[Any] = vocab
_lowercase : Tuple = merges
_lowercase : int = BytePairTokenizer(UpperCamelCase_ , UpperCamelCase_ , sequence_length=UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Any , UpperCamelCase_ : GPTaTokenizer , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = [' '.join(UpperCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
_lowercase : int = tokenizer.get_vocab()
return cls(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int , UpperCamelCase_ : Union[str, os.PathLike] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
_lowercase : int = GPTaTokenizer.from_pretrained(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
return cls.from_tokenizer(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
return cls(**UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int = None ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = self.tf_tokenizer(UpperCamelCase_ )
_lowercase : Optional[Any] = tf.ones_like(UpperCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowercase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowercase , _lowercase : List[Any] = pad_model_inputs(
UpperCamelCase_ , max_seq_length=UpperCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 4 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_A : Optional[Any] =pytest.mark.integration
@require_faiss
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
_lowercase : int = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
import faiss
_lowercase : Dataset = self._create_dummy_dataset()
_lowercase : Union[str, Any] = dset.map(
lambda UpperCamelCase_ , UpperCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ )
_lowercase : List[str] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase , _lowercase : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
import faiss
_lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase : Tuple = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
import faiss
_lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
_lowercase , _lowercase : Optional[Any] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
from elasticsearch import Elasticsearch
_lowercase : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowercase : Optional[int] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowercase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
_lowercase : str = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCamelCase_ )
_lowercase , _lowercase : int = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
import faiss
_lowercase : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowercase : List[str] = np.zeros(5 , dtype=np.floataa )
_lowercase : int = 1
_lowercase , _lowercase : Optional[Any] = index.search(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowercase : Any = np.eye(5 , dtype=np.floataa )[::-1]
_lowercase , _lowercase : Tuple = index.search_batch(UpperCamelCase_ )
self.assertRaises(UpperCamelCase_ , index.search_batch , queries[0] )
_lowercase : List[Any] = [scores[0] for scores in total_scores]
_lowercase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
import faiss
_lowercase : List[Any] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowercase : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase_ ):
_lowercase : Union[str, Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
import faiss
_lowercase : Tuple = faiss.IndexFlat(5 )
_lowercase : List[Any] = FaissIndex(custom_index=UpperCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
import faiss
_lowercase : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
_lowercase : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowercase : Dict = np.zeros(5 , dtype=np.floataa )
_lowercase : List[str] = 1
_lowercase , _lowercase : Dict = index.search(UpperCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
import faiss
_lowercase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
_lowercase : str = 'index.faiss'
_lowercase : List[Any] = f'''mock://{index_name}'''
index.save(_lowercase, storage_options=mockfs.storage_options )
_lowercase : Any = FaissIndex.load(_lowercase, storage_options=mockfs.storage_options )
_lowercase : Any = np.zeros(5, dtype=np.floataa )
_lowercase : Optional[int] = 1
_lowercase , _lowercase : Tuple = index.search(_lowercase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
_lowercase : Tuple = Elasticsearch()
_lowercase : Optional[int] = {'acknowledged': True}
_lowercase : List[Any] = ElasticSearchIndex(es_client=UpperCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
_lowercase : Optional[Any] = 'foo'
_lowercase : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowercase , _lowercase : Dict = index.search(UpperCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowercase : Optional[Any] = 'foo'
_lowercase : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
_lowercase , _lowercase : Optional[int] = index.search(UpperCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowercase : Dict = ['foo', 'bar', 'foobar']
_lowercase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowercase , _lowercase : Tuple = index.search_batch(UpperCamelCase_ )
_lowercase : Union[str, Any] = [scores[0] for scores in total_scores]
_lowercase : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
# batched queries with timeout
_lowercase : List[str] = ['foo', 'bar', 'foobar']
_lowercase : Optional[int] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
_lowercase , _lowercase : Tuple = index.search_batch(UpperCamelCase_ , request_timeout=30 )
_lowercase : Optional[Any] = [scores[0] for scores in total_scores]
_lowercase : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase_ )
| 4 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase_ : str , UpperCamelCase_ : int=13 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=99 , UpperCamelCase_ : str=32 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Any=16 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]="None" , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : List[str]=None , ) -> str:
'''simple docstring'''
_lowercase : List[Any] = parent
_lowercase : int = batch_size
_lowercase : Any = seq_length
_lowercase : List[str] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Any = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Tuple = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Optional[Any] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Union[str, Any] = num_labels
_lowercase : Any = num_choices
_lowercase : Any = relative_attention
_lowercase : str = position_biased_input
_lowercase : str = pos_att_type
_lowercase : Union[str, Any] = scope
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Dict = None
if self.use_input_mask:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : str = self.get_config()
_lowercase : Union[str, Any] = 300
return config
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
_lowercase : List[str] = DebertaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
_lowercase : List[str] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )[0]
_lowercase : List[str] = model(UpperCamelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Tuple = DebertaForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
_lowercase : List[Any] = self.num_labels
_lowercase : Union[str, Any] = DebertaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : str = self.num_labels
_lowercase : Tuple = DebertaForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = DebertaForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowercase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , unittest.TestCase ):
'''simple docstring'''
A_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : str = DebertaModelTester(self )
_lowercase : str = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase_ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase_ )
@slow
def __UpperCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DebertaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowercase : List[str] = DebertaModel.from_pretrained('microsoft/deberta-base' )
_lowercase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowercase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
# compare the actual values for a slice.
_lowercase : Dict = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 4 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_A : List[str] =logging.get_logger(__name__)
_A : Any ={
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """dpt"""
def __init__( self : Tuple , UpperCamelCase_ : List[str]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=3072 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : List[Any]=384 , UpperCamelCase_ : int=16 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=[2, 5, 8, 11] , UpperCamelCase_ : Tuple="project" , UpperCamelCase_ : Tuple=[4, 2, 1, 0.5] , UpperCamelCase_ : List[Any]=[96, 192, 384, 768] , UpperCamelCase_ : Any=256 , UpperCamelCase_ : Tuple=-1 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=0.4 , UpperCamelCase_ : List[str]=255 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Union[str, Any]=[1, 1024, 24, 24] , UpperCamelCase_ : List[str]=[0, 1] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : List[Any] = hidden_size
_lowercase : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_lowercase : Optional[int] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
_lowercase : List[str] = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
_lowercase : List[Any] = BitConfig(**UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Any = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowercase : Optional[int] = backbone_featmap_shape
_lowercase : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_lowercase : str = None
_lowercase : List[str] = None
_lowercase : Any = []
_lowercase : List[Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : List[str] = initializer_range
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = image_size
_lowercase : List[Any] = patch_size
_lowercase : int = num_channels
_lowercase : Union[str, Any] = qkv_bias
_lowercase : int = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_lowercase : Any = readout_type
_lowercase : str = reassemble_factors
_lowercase : Union[str, Any] = neck_hidden_sizes
_lowercase : Any = fusion_hidden_size
_lowercase : Optional[int] = head_in_index
_lowercase : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowercase : Optional[Any] = use_auxiliary_head
_lowercase : Any = auxiliary_loss_weight
_lowercase : Tuple = semantic_loss_ignore_index
_lowercase : Optional[int] = semantic_classifier_dropout
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowercase : Optional[int] = self.backbone_config.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Union[str, Any] ={
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =[
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_A : Optional[Any] ='''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_A : Dict ='''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_A : List[Any] ='''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=False ) -> List[str]:
'''simple docstring'''
if return_pvalue:
_lowercase : Tuple = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )}
| 4 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_A : int =(
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
_A : Dict =(
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
_A : int =(
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
_A : Optional[int] =(
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def __UpperCamelCase ( ) -> Dict:
_lowercase , _lowercase : Optional[Any] = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
_lowercase : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowercase , _lowercase : str = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __UpperCamelCase ( _lowercase = 100 ) -> Any:
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : List[Any] = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('hand, other, expected', generate_random_hands() )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def __UpperCamelCase ( ) -> Dict:
_lowercase : Optional[int] = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
_lowercase : Tuple = poker_hands.copy()
shuffle(_lowercase )
_lowercase : Optional[int] = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def __UpperCamelCase ( ) -> Any:
# Test that five high straights are compared correctly.
_lowercase : List[Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __UpperCamelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowercase : Tuple = PokerHand('2C 4S AS 3D 5C' )
_lowercase : Any = True
_lowercase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __UpperCamelCase ( ) -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowercase : Optional[int] = 0
_lowercase : int = os.path.abspath(os.path.dirname(_lowercase ) )
_lowercase : Optional[int] = os.path.join(_lowercase, 'poker_hands.txt' )
with open(_lowercase ) as file_hand:
for line in file_hand:
_lowercase : Optional[int] = line[:14].strip()
_lowercase : str = line[15:].strip()
_lowercase , _lowercase : str = PokerHand(_lowercase ), PokerHand(_lowercase )
_lowercase : Optional[int] = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_A : str =TypeVar('''T''')
_A : int =Union[List[T], Tuple[T, ...]]
_A : Dict =Union[T, List[T], Dict[str, T]]
_A : Any =Union[str, bytes, os.PathLike]
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A : List[Any] =logging.get_logger(__name__)
_A : int =OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
_A : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : int = model_type_to_module_name(_lowercase )
_lowercase : List[Any] = importlib.import_module(f'''.{module_name}''', 'transformers.models' )
try:
return getattr(_lowercase, _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase, '__name__', _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : str = importlib.import_module('transformers' )
if hasattr(_lowercase, _lowercase ):
return getattr(_lowercase, _lowercase )
return None
def __UpperCamelCase ( _lowercase, _lowercase = None, _lowercase = False, _lowercase = False, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = False, **_lowercase, ) -> str:
_lowercase : List[Any] = get_file_from_repo(
_lowercase, _lowercase, cache_dir=_lowercase, force_download=_lowercase, resume_download=_lowercase, proxies=_lowercase, use_auth_token=_lowercase, revision=_lowercase, local_files_only=_lowercase, )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowercase, encoding='utf-8' ) as reader:
return json.load(_lowercase )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Dict ) -> List[Any]:
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __UpperCAmelCase ( cls : Optional[Any] , UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : List[Any] = kwargs.pop('config' , UpperCamelCase_ )
_lowercase : Optional[int] = kwargs.pop('trust_remote_code' , UpperCamelCase_ )
_lowercase : Optional[Any] = True
_lowercase , _lowercase : Any = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = config_dict.get('feature_extractor_type' , UpperCamelCase_ )
_lowercase : Dict = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowercase : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Any = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.feature_extractor_type``
_lowercase : Union[str, Any] = getattr(UpperCamelCase_ , 'feature_extractor_type' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowercase : Optional[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowercase : Any = feature_extractor_class_from_name(UpperCamelCase_ )
_lowercase : str = feature_extractor_auto_map is not None
_lowercase : str = feature_extractor_class is not None or type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING
_lowercase : List[Any] = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : str = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : int = kwargs.pop('code_revision' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
_lowercase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase_ )]
return feature_extractor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Dict:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 4 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __UpperCamelCase ( _lowercase ) -> Optional[Any]:
_lowercase : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
_lowercase : Dict = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_lowercase : Optional[Any] = components[:-1] + [test_fn.replace('.py', '' )]
_lowercase : List[Any] = '.'.join(_lowercase )
return test_module_path
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Optional[Any] = get_module_path(_lowercase )
_lowercase : Optional[Any] = importlib.import_module(_lowercase )
return test_module
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : str = []
_lowercase : Optional[Any] = get_test_module(_lowercase )
for attr in dir(_lowercase ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_lowercase, _lowercase ) )
# sort with class names
return sorted(_lowercase, key=lambda _lowercase : x.__name__ )
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
_lowercase : Any = []
_lowercase : int = get_test_module(_lowercase )
for attr in dir(_lowercase ):
_lowercase : Union[str, Any] = getattr(_lowercase, _lowercase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowercase : Optional[Any] = getattr(_lowercase, 'all_model_classes', [] )
if len(_lowercase ) > 0:
test_classes.append(_lowercase )
# sort with class names
return sorted(_lowercase, key=lambda _lowercase : x.__name__ )
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : Dict = get_test_classes(_lowercase )
_lowercase : str = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowercase, key=lambda _lowercase : x.__name__ )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : List[str] = test_class()
if hasattr(_lowercase, 'setUp' ):
test.setUp()
_lowercase : Any = None
if hasattr(_lowercase, 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowercase : Tuple = test.model_tester.__class__
return model_tester
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
_lowercase : Optional[Any] = get_test_classes(_lowercase )
_lowercase : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowercase )
# sort with class names
return sorted(_lowercase, key=lambda _lowercase : x.__name__ )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Optional[int]:
_lowercase : Union[str, Any] = get_test_classes_for_model(_lowercase, _lowercase )
_lowercase : Union[str, Any] = []
for test_class in test_classes:
_lowercase : List[str] = get_model_tester_from_test_class(_lowercase )
if tester_class is not None:
tester_classes.append(_lowercase )
# sort with class names
return sorted(_lowercase, key=lambda _lowercase : x.__name__ )
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Optional[int] = get_test_classes(_lowercase )
_lowercase : Tuple = {test_class: get_model_tester_from_test_class(_lowercase ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : Any = get_model_classes(_lowercase )
_lowercase : Optional[Any] = {
model_class: get_test_classes_for_model(_lowercase, _lowercase ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase ( _lowercase ) -> str:
_lowercase : List[Any] = get_model_classes(_lowercase )
_lowercase : str = {
model_class: get_tester_classes_for_model(_lowercase, _lowercase ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase ( _lowercase ) -> Union[str, Any]:
if isinstance(_lowercase, _lowercase ):
return o
elif isinstance(_lowercase, _lowercase ):
return o.__name__
elif isinstance(_lowercase, (list, tuple) ):
return [to_json(_lowercase ) for x in o]
elif isinstance(_lowercase, _lowercase ):
return {to_json(_lowercase ): to_json(_lowercase ) for k, v in o.items()}
else:
return o
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_A : Any =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ) -> Any:
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(UpperCamelCase_ )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
return {}, {}, {}
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[str] = load_image(UpperCamelCase_ )
_lowercase : Optional[Any] = image.size
_lowercase : Optional[Any] = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Union[str, Any] = self.model(**UpperCamelCase_ )
return model_outputs
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
_lowercase : int = model_outputs.predicted_depth
_lowercase : List[str] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=UpperCamelCase_ )
_lowercase : Optional[Any] = prediction.squeeze().cpu().numpy()
_lowercase : List[str] = (output * 255 / np.max(UpperCamelCase_ )).astype('uint8' )
_lowercase : Union[str, Any] = Image.fromarray(UpperCamelCase_ )
_lowercase : Tuple = {}
_lowercase : Tuple = predicted_depth
_lowercase : Dict = depth
return output_dict
| 4 |
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCamelCase ( _lowercase = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(_lowercase ):
_lowercase : Optional[int] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase, _lowercase ).lstrip('./' )
def __UpperCamelCase ( _lowercase ) -> List[str]:
return f'''{i * " "}*''' if i else "\n##"
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
_lowercase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_", " " ).title()}''' )
return new_path
def __UpperCamelCase ( _lowercase = "." ) -> None:
_lowercase : Dict = ''
for filepath in sorted(good_file_paths(_lowercase ) ):
_lowercase , _lowercase : Optional[Any] = os.path.split(_lowercase )
if filepath != old_path:
_lowercase : Dict = print_path(_lowercase, _lowercase )
_lowercase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowercase : Dict = f'''{filepath}/{filename}'''.replace(' ', '%20' )
_lowercase : Optional[int] = os.path.splitext(filename.replace('_', ' ' ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 4 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
A_ = 42
A_ = 42
def __init__( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : Tuple , UpperCamelCase_ : List[str] = 1 , UpperCamelCase_ : Union[str, Any] = 50 , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Optional[int] = "pil" , UpperCamelCase_ : Dict = True , **UpperCamelCase_ : Dict , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.unet.config.sample_size
_lowercase : Union[str, Any] = (batch_size, 3, img_size, img_size)
_lowercase : Tuple = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase : int = randn_tensor(A__ , generator=A__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowercase : str = self.scheduler.schedule[t]
_lowercase : str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase : Union[str, Any] = self.scheduler.add_noise_to_input(A__ , A__ , generator=A__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : List[str] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase : int = self.scheduler.step(A__ , A__ , A__ , A__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowercase : str = self.scheduler.step_correct(
A__ , A__ , A__ , A__ , step_output.prev_sample , step_output['derivative'] , )
_lowercase : Any = step_output.prev_sample
_lowercase : int = (sample / 2 + 0.5).clamp(0 , 1 )
_lowercase : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : str = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] ={'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : int ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_A : List[str] ={
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_A : Optional[Any] ={
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
_lowercase : Tuple = (
list(range(ord('!' ), ord('~' ) + 1 ) ) + list(range(ord('¡' ), ord('¬' ) + 1 ) ) + list(range(ord('®' ), ord('ÿ' ) + 1 ) )
)
_lowercase : List[str] = bs[:]
_lowercase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
_lowercase : Tuple = [chr(__A ) for n in cs]
return dict(zip(__A, __A ) )
def __UpperCamelCase ( _lowercase ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Union[str, Any] = char
return pairs
class lowerCamelCase__ ( _snake_case ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Dict="<unk>" , UpperCamelCase_ : Optional[Any]="<pad>" , UpperCamelCase_ : int="<mask>" , UpperCamelCase_ : str=False , **UpperCamelCase_ : List[Any] , ) -> List[str]:
'''simple docstring'''
_lowercase : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_lowercase : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_lowercase : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
_lowercase : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_lowercase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_lowercase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
_lowercase : Optional[Any] = json.load(lowerCAmelCase__ )
_lowercase : str = {v: k for k, v in self.encoder.items()}
_lowercase : Tuple = errors # how to handle errors in decoding
_lowercase : Any = bytes_to_unicode()
_lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='utf-8' ) as merges_handle:
_lowercase : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
_lowercase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_lowercase : List[str] = {}
_lowercase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowercase : Any = tuple(lowerCAmelCase__ )
_lowercase : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
_lowercase : List[str] = min(lowerCAmelCase__ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(lowerCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : Tuple = bigram
_lowercase : Union[str, Any] = []
_lowercase : Any = 0
while i < len(lowerCAmelCase__ ):
try:
_lowercase : List[str] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Optional[int] = tuple(lowerCAmelCase__ )
_lowercase : List[str] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
_lowercase : List[Any] = get_pairs(lowerCAmelCase__ )
_lowercase : List[Any] = ' '.join(lowerCAmelCase__ )
_lowercase : List[str] = word
return word
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
_lowercase : int = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
_lowercase : Any = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = ''.join(lowerCAmelCase__ )
_lowercase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCAmelCase ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
_lowercase : int = 0
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase : Union[str, Any] = token_index
writer.write(' '.join(lowerCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Dict = [self.cls_token_id]
_lowercase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : Tuple = [self.sep_token_id]
_lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
_lowercase : str = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
_lowercase : Dict = ' ' + text
return (text, kwargs)
| 701 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=99 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ) -> Tuple:
'''simple docstring'''
_lowercase : int = parent
_lowercase : str = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_attention_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Any = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : int = None
if self.use_attention_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Any = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = config_and_inputs
_lowercase : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = True
A_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Tuple = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowercase : Optional[int] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase_ )
_lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Dict = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : int = model(UpperCamelCase_ )[0]
_lowercase : Union[str, Any] = 5_0000
_lowercase : str = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowercase : int = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 4 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCamelCase ( *_lowercase ) -> int:
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowercase : List[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCamelCase ( _lowercase ) -> bool:
_lowercase : Optional[Any] = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCamelCase ( _lowercase = None, _lowercase = 128 ) -> Optional[Any]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE, starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = starting_batch_size
def decorator(*_lowercase, **_lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowercase : List[str] = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowercase : Tuple = ', '.join([f'''{arg}={value}''' for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(_SCREAMING_SNAKE_CASE, *_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 702 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_A : Optional[int] =logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""input_features""", """is_longer"""]
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=64 , UpperCamelCase_ : int=4_8000 , UpperCamelCase_ : Union[str, Any]=480 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 1_4000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Tuple = top_db
_lowercase : Any = truncation
_lowercase : str = padding
_lowercase : int = fft_window_size
_lowercase : Any = (fft_window_size >> 1) + 1
_lowercase : int = hop_length
_lowercase : Any = max_length_s
_lowercase : str = max_length_s * sampling_rate
_lowercase : Any = sampling_rate
_lowercase : List[Any] = frequency_min
_lowercase : Tuple = frequency_max
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale='htk' , )
_lowercase : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase : Tuple = copy.deepcopy(self.__dict__ )
_lowercase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
_lowercase : List[str] = spectrogram(
UpperCamelCase_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Union[str, Any] = [0]
# randomly choose index for each part
_lowercase : Tuple = np.random.choice(ranges[0] )
_lowercase : int = np.random.choice(ranges[1] )
_lowercase : Any = np.random.choice(ranges[2] )
_lowercase : int = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : List[Any] = torch.tensor(mel[None, None, :] )
_lowercase : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=UpperCamelCase_ )
_lowercase : str = mel_shrink[0][0].numpy()
_lowercase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : List[str] , UpperCamelCase_ : np.array , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Any = len(UpperCamelCase_ ) - max_length
_lowercase : Dict = np.random.randint(0 , overflow + 1 )
_lowercase : Optional[int] = waveform[idx : idx + max_length]
_lowercase : Dict = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : List[Any] = False
else:
_lowercase : Union[str, Any] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : int = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
_lowercase : Any = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : List[Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Union[str, Any] = int(max_length / len(UpperCamelCase_ ) )
_lowercase : Union[str, Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_ ) )
_lowercase : Dict = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : str = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters )
_lowercase : Dict = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : List[Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Dict , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Dict = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
_lowercase : Any = np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [np.asarray(UpperCamelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : Optional[Any] = [
self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_ )
for waveform in raw_speech
]
_lowercase : List[Any] = []
_lowercase : Dict = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase_ )
is_longer.append(UpperCamelCase_ )
if truncation == "fusion" and sum(UpperCamelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : Optional[Any] = np.random.randint(0 , len(UpperCamelCase_ ) )
_lowercase : str = True
if isinstance(input_mel[0] , UpperCamelCase_ ):
_lowercase : str = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : Tuple = [[longer] for longer in is_longer]
_lowercase : Optional[Any] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Optional[int] = BatchFeature(UpperCamelCase_ )
if return_tensors is not None:
_lowercase : List[Any] = input_features.convert_to_tensors(UpperCamelCase_ )
return input_features
| 4 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = False
A_ = 3.0
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowercase : Optional[int] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowercase : Tuple = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowercase : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[str] = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
_A : Optional[Any] =DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_A : Optional[Any] =Accelerator(kwargs_handlers=[ddp_scaler])
_A : List[str] =torch.nn.Linear(1_0_0, 2_0_0)
_A : Optional[Any] =accelerator.prepare(model)
# Check the values changed in kwargs
_A : Dict =""
_A : Any =model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 703 |
'''simple docstring'''
from __future__ import annotations
import requests
def __UpperCamelCase ( _lowercase ) -> dict:
_lowercase : Optional[int] = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_lowercase ).json()
def __UpperCamelCase ( _lowercase = 10 ) -> list[dict]:
_lowercase : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
_lowercase : Optional[Any] = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def __UpperCamelCase ( _lowercase = 10 ) -> str:
_lowercase : Tuple = hackernews_top_stories(_lowercase )
return "\n".join('* [{title}]({url})'.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
_A : Dict =list[list[float | int]]
def __UpperCamelCase ( _lowercase, _lowercase ) -> Dict:
_lowercase : int = len(_A )
_lowercase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_A )]
_lowercase : int
_lowercase : int
_lowercase : int
_lowercase : int
_lowercase : int
_lowercase : float
for row in range(_A ):
for col in range(_A ):
_lowercase : Tuple = matrix[row][col]
_lowercase : List[str] = vector[row][0]
_lowercase : int = 0
_lowercase : List[Any] = 0
while row < size and col < size:
# pivoting
_lowercase : List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A, _A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase : List[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, _A ):
_lowercase : Union[str, Any] = augmented[rowa][col] / augmented[row][col]
_lowercase : int = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, _A ):
for row in range(_A ):
_lowercase : Any = augmented[row][col] / augmented[col][col]
for cola in range(_A, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(_A )
]
def __UpperCamelCase ( _lowercase ) -> int:
_lowercase : int = len(_A )
_lowercase : Matrix = [[0 for _ in range(_A )] for _ in range(_A )]
_lowercase : Matrix = [[0] for _ in range(_A )]
_lowercase : Matrix
_lowercase : int
_lowercase : int
_lowercase : int
for x_val, y_val in enumerate(_A ):
for col in range(_A ):
_lowercase : List[Any] = (x_val + 1) ** (size - col - 1)
_lowercase : str = y_val
_lowercase : Optional[int] = solve(_A, _A )
def interpolated_func(_lowercase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_A ) )
return interpolated_func
def __UpperCamelCase ( _lowercase ) -> Any:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __UpperCamelCase ( _lowercase = question_function, _lowercase = 10 ) -> int:
_lowercase : list[int] = [func(_A ) for x_val in range(1, order + 1 )]
_lowercase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
_lowercase : int = 0
_lowercase : Callable[[int], int]
_lowercase : int
for poly in polynomials:
_lowercase : Any = 1
while func(_A ) == poly(_A ):
x_val += 1
ret += poly(_A )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : Dict ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """megatron-bert"""
def __init__( self : int , UpperCamelCase_ : int=2_9056 , UpperCamelCase_ : Optional[int]=1024 , UpperCamelCase_ : Optional[Any]=24 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=4096 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Any=1E-12 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Any , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Dict = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Optional[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[Any] = position_embedding_type
_lowercase : Optional[Any] = use_cache
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
A_ = 42
A_ = 42
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
_lowercase : list[list[Edge]] = [[] for _ in range(UpperCAmelCase_ )]
_lowercase : Dict = size
def __getitem__( self : Any , UpperCamelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return self._size
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCAmelCase_ , UpperCAmelCase_ ) )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = deque([start_vertex] )
_lowercase : list[int | None] = [None] * self.size
_lowercase : Union[str, Any] = 0
while queue:
_lowercase : Dict = queue.popleft()
_lowercase : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowercase : Optional[int] = current_distance + edge.weight
_lowercase : Tuple = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_lowercase : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( _lowercase ) -> List[Any]:
_lowercase : Tuple = args.pruning_method
_lowercase : int = args.threshold
_lowercase : str = args.model_name_or_path.rstrip('/' )
_lowercase : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_lowercase : str = torch.load(os.path.join(_lowercase, 'pytorch_model.bin' ) )
_lowercase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase : Optional[int] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_lowercase : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_lowercase : Dict = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_lowercase : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_lowercase : Optional[Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase : Optional[Any] = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : List[str] = TopKBinarizer.apply(_lowercase, _lowercase )
_lowercase : str = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase : str = name[:-6]
_lowercase : Optional[Any] = model[f'''{prefix_}mask_scores''']
_lowercase : str = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_lowercase : Optional[int] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase : Optional[int] = name[:-6]
_lowercase : List[str] = model[f'''{prefix_}mask_scores''']
_lowercase , _lowercase : Union[str, Any] = -0.1, 1.1
_lowercase : str = torch.sigmoid(_lowercase )
_lowercase : int = s * (r - l) + l
_lowercase : Optional[Any] = s_bar.clamp(min=0.0, max=1.0 )
_lowercase : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f'''bertarized_{os.path.basename(_lowercase )}''' )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_lowercase, os.path.join(_lowercase, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_A : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_A : List[Any] =parser.parse_args()
main(args)
| 4 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> list:
_lowercase : List[Any] = [0] * len(_lowercase )
for i in range(1, len(_lowercase ) ):
# use last results for better performance - dynamic programming
_lowercase : Dict = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase : Any = j
return prefix_result
def __UpperCamelCase ( _lowercase ) -> int:
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = data
_lowercase : str = None
_lowercase : Dict = None
def __UpperCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
_lowercase : List[str] = input('Enter the value of the root node: ' ).strip().lower()
_lowercase : queue.Queue = queue.Queue()
_lowercase : Union[str, Any] = TreeNode(int(__A ) )
q.put(__A )
while not q.empty():
_lowercase : Dict = q.get()
_lowercase : Optional[Any] = f'''Enter the left node of {node_found.data}: '''
_lowercase : Tuple = input(__A ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowercase : Tuple = TreeNode(int(__A ) )
_lowercase : Optional[int] = left_node
q.put(__A )
_lowercase : Union[str, Any] = f'''Enter the right node of {node_found.data}: '''
_lowercase : Tuple = input(__A ).strip().lower() or '''n'''
if check == "n":
return tree_node
_lowercase : Optional[Any] = TreeNode(int(__A ) )
_lowercase : List[str] = right_node
q.put(__A )
raise
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
print(node.data, end=',' )
pre_order(node.left )
pre_order(node.right )
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
in_order(node.left )
print(node.data, end=',' )
in_order(node.right )
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=',' )
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
_lowercase : queue.Queue = queue.Queue()
q.put(__A )
while not q.empty():
_lowercase : List[Any] = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
_lowercase : queue.Queue = queue.Queue()
q.put(__A )
while not q.empty():
_lowercase : Optional[Any] = []
while not q.empty():
_lowercase : Tuple = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__A )
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
_lowercase : list[TreeNode] = []
_lowercase : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=',' )
stack.append(__A )
_lowercase : Dict = n.left
# end of while means current node doesn't have left child
_lowercase : List[str] = stack.pop()
# start to traverse its right child
_lowercase : List[str] = n.right
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
_lowercase : list[TreeNode] = []
_lowercase : List[str] = node
while n or stack:
while n:
stack.append(__A )
_lowercase : Optional[Any] = n.left
_lowercase : Tuple = stack.pop()
print(n.data, end=',' )
_lowercase : Union[str, Any] = n.right
def __UpperCamelCase ( _lowercase ) -> None:
if not isinstance(__A, __A ) or not node:
return
_lowercase : Tuple = [], []
_lowercase : Optional[int] = node
stacka.append(__A )
while stacka: # to find the reversed order of post order, store it in stack2
_lowercase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__A )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=',' )
def __UpperCamelCase ( _lowercase = "", _lowercase=50, _lowercase="*" ) -> str:
if not s:
return "\n" + width * char
_lowercase : Optional[int] = divmod(width - len(__A ) - 2, 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
_A : Optional[int] =build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
'''simple docstring'''
_A : Tuple ='0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 708 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase, _lowercase ) -> int:
_lowercase : Optional[int] = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowercase, 'r' ) as f:
_lowercase : Optional[int] = f.readlines()
_lowercase : Dict = f'''class {class_name}('''
_lowercase : List[Any] = f'''{4 * " "}def {test_name}('''
_lowercase : List[str] = f'''{8 * " "}{correct_line.split()[0]}'''
_lowercase : List[str] = f'''{16 * " "}{correct_line.split()[0]}'''
_lowercase : Dict = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = 0
_lowercase : Tuple = 0
_lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowercase ):
_lowercase : int = True
elif in_class and line.startswith(_lowercase ):
_lowercase : List[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
_lowercase : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : List[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
_lowercase : Any = False
else:
new_lines.append(_lowercase )
with open(_lowercase, 'w' ) as f:
for line in new_lines:
f.write(_lowercase )
def __UpperCamelCase ( _lowercase, _lowercase=None ) -> Optional[Any]:
if fail is not None:
with open(_lowercase, 'r' ) as f:
_lowercase : Any = {l.strip() for l in f.readlines()}
else:
_lowercase : str = None
with open(_lowercase, 'r' ) as f:
_lowercase : str = f.readlines()
_lowercase : Union[str, Any] = defaultdict(_lowercase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase, _lowercase, _lowercase, _lowercase, _lowercase )
if __name__ == "__main__":
_A : str =argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
_A : Union[str, Any] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : Optional[int] ={
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = "sew"
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Optional[int]=768 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : Optional[int]=3072 , UpperCamelCase_ : int=2 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : List[Any]=1E-5 , UpperCamelCase_ : List[Any]="group" , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase_ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=128 , UpperCamelCase_ : Union[str, Any]=16 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=0.05 , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : int=10 , UpperCamelCase_ : Any=0 , UpperCamelCase_ : Union[str, Any]="mean" , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Optional[int]=256 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : List[Any]=2 , **UpperCamelCase_ : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
_lowercase : Tuple = hidden_size
_lowercase : Tuple = feat_extract_norm
_lowercase : Optional[int] = feat_extract_activation
_lowercase : Dict = list(UpperCamelCase_ )
_lowercase : Optional[Any] = list(UpperCamelCase_ )
_lowercase : str = list(UpperCamelCase_ )
_lowercase : Union[str, Any] = conv_bias
_lowercase : Any = num_conv_pos_embeddings
_lowercase : Optional[int] = num_conv_pos_embedding_groups
_lowercase : List[str] = len(self.conv_dim )
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = squeeze_factor
_lowercase : str = hidden_act
_lowercase : Any = num_attention_heads
_lowercase : int = hidden_dropout
_lowercase : List[str] = attention_dropout
_lowercase : List[str] = activation_dropout
_lowercase : str = feat_proj_dropout
_lowercase : Union[str, Any] = final_dropout
_lowercase : Dict = layerdrop
_lowercase : Any = layer_norm_eps
_lowercase : List[str] = initializer_range
_lowercase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase : Optional[Any] = apply_spec_augment
_lowercase : int = mask_time_prob
_lowercase : Optional[int] = mask_time_length
_lowercase : Tuple = mask_time_min_masks
_lowercase : Dict = mask_feature_prob
_lowercase : Optional[int] = mask_feature_length
_lowercase : List[str] = mask_feature_min_masks
# ctc loss
_lowercase : int = ctc_loss_reduction
_lowercase : List[Any] = ctc_zero_infinity
# sequence classification
_lowercase : List[str] = use_weighted_layer_sum
_lowercase : List[Any] = classifier_proj_size
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = KandinskyVaaPriorPipeline
A_ = ["""prompt"""]
A_ = ["""prompt""", """negative_prompt"""]
A_ = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
A_ = False
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
return 100
@property
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def __UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_lowercase : Any = PriorTransformer(**__A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowercase : Optional[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_lowercase : Any = CLIPVisionModelWithProjection(__A )
return model
@property
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=__A , do_normalize=__A , do_resize=__A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = self.dummy_prior
_lowercase : Any = self.dummy_image_encoder
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[str] = self.dummy_tokenizer
_lowercase : List[str] = self.dummy_image_processor
_lowercase : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=10.0 , )
_lowercase : Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=0 ) -> List[str]:
'''simple docstring'''
if str(__A ).startswith('mps' ):
_lowercase : Any = torch.manual_seed(__A )
else:
_lowercase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowercase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Tuple = "cpu"
_lowercase : Any = self.get_dummy_components()
_lowercase : int = self.pipeline_class(**__A )
_lowercase : str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase : int = pipe(**self.get_dummy_inputs(__A ) )
_lowercase : Any = output.image_embeds
_lowercase : Optional[int] = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
_lowercase : Any = image[0, -10:]
_lowercase : List[str] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowercase : str = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Tuple = torch_device == "cpu"
_lowercase : Optional[int] = True
_lowercase : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , test_mean_pixel_difference=__A , )
@skip_mps
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Union[str, Any] = torch_device == "cpu"
_lowercase : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=__A , test_mean_pixel_difference=__A , )
| 710 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( _lowercase ) -> Any:
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead', __lowerCAmelCase, )
if isinstance(__lowerCAmelCase, torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase, PIL.Image.Image ):
_lowercase : Optional[Any] = [image]
if isinstance(image[0], PIL.Image.Image ):
_lowercase : List[Any] = image[0].size
_lowercase : Dict = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowercase : int = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : Dict = np.concatenate(__lowerCAmelCase, axis=0 )
_lowercase : Tuple = np.array(__lowerCAmelCase ).astype(np.floataa ) / 2_5_5.0
_lowercase : List[Any] = image.transpose(0, 3, 1, 2 )
_lowercase : Dict = 2.0 * image - 1.0
_lowercase : Dict = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0], torch.Tensor ):
_lowercase : Union[str, Any] = torch.cat(__lowerCAmelCase, dim=0 )
return image
def __UpperCamelCase ( _lowercase ) -> Optional[int]:
if isinstance(__lowerCAmelCase, torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase, PIL.Image.Image ):
_lowercase : Optional[Any] = [mask]
if isinstance(mask[0], PIL.Image.Image ):
_lowercase : Union[str, Any] = mask[0].size
_lowercase : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowercase : Dict = [np.array(m.convert('L' ).resize((w, h), resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_lowercase : int = np.concatenate(__lowerCAmelCase, axis=0 )
_lowercase : List[Any] = mask.astype(np.floataa ) / 2_5_5.0
_lowercase : Any = 0
_lowercase : Dict = 1
_lowercase : List[str] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0], torch.Tensor ):
_lowercase : Optional[Any] = torch.cat(__lowerCAmelCase, dim=0 )
return mask
class lowerCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
A_ = 42
A_ = 42
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] = 250 , UpperCamelCase_ : Union[str, Any] = 0.0 , UpperCamelCase_ : Dict = 10 , UpperCamelCase_ : Union[str, Any] = 10 , UpperCamelCase_ : Any = None , UpperCamelCase_ : Dict = "pil" , UpperCamelCase_ : Optional[Any] = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_lowercase : str = image
_lowercase : List[Any] = _preprocess_image(_a )
_lowercase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase : Optional[int] = _preprocess_mask(_a )
_lowercase : Optional[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase : Optional[int] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase : int = original_image.shape
_lowercase : List[str] = randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a , _a , _a , self.device )
_lowercase : Dict = eta
_lowercase : Dict = self.scheduler.timesteps[0] + 1
_lowercase : Union[str, Any] = generator[0] if isinstance(_a , _a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowercase : Tuple = self.unet(_a , _a ).sample
# compute previous image: x_t -> x_t-1
_lowercase : Tuple = self.scheduler.step(_a , _a , _a , _a , _a , _a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowercase : List[Any] = self.scheduler.undo_step(_a , _a , _a )
_lowercase : int = t
_lowercase : int = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 711 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : List[Any] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_lowercase : str = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_lowercase : List[Any] = 'The dog is cute and lives in the garden house'
_lowercase : Optional[int] = jnp.array([tokenizer.encode(UpperCamelCase_ )] )
_lowercase : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_lowercase : Tuple = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
_lowercase : List[str] = model(UpperCamelCase_ )['last_hidden_state']
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1E-3 ) )
| 4 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_A : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : int =logging.get_logger(__name__)
_A : Union[str, Any] ={
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_vision_model"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : Dict = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = attention_dropout
_lowercase : int = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : str = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip_qformer"""
def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Optional[int] = initializer_range
_lowercase : Tuple = layer_norm_eps
_lowercase : List[str] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : int = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase_ )
_lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase : Optional[int] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """instructblip"""
A_ = True
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
if vision_config is None:
_lowercase : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase : List[Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ )
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : int = self.text_config.is_encoder_decoder
_lowercase : Tuple = num_query_tokens
_lowercase : str = self.vision_config.hidden_size
_lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : List[Any] = 1.0
_lowercase : int = 0.02
@classmethod
def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = copy.deepcopy(self.__dict__ )
_lowercase : Optional[int] = self.vision_config.to_dict()
_lowercase : Optional[Any] = self.qformer_config.to_dict()
_lowercase : Tuple = self.text_config.to_dict()
_lowercase : Dict = self.__class__.model_type
return output
| 4 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = ["""image_processor""", """tokenizer"""]
A_ = """CLIPImageProcessor"""
A_ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , **UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_lowercase : Optional[Any] = kwargs.pop('feature_extractor' )
_lowercase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : List[str] , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : int=None , **UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Tuple = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_lowercase : Any = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
_lowercase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def __UpperCAmelCase ( self : List[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCAmelCase ( self : str , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = self.tokenizer.model_input_names
_lowercase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_A : Union[str, Any] =False
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : int = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = """A painting of a squirrel eating a burger """
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : Tuple = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCamelCase )
_lowercase : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : List[Any] = generator.manual_seed(0 )
_lowercase : str = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
_lowercase : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : List[str] = """A painting of a squirrel eating a burger """
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : Union[str, Any] = pipe(
prompt=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_lowercase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : Optional[int] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 714 |
'''simple docstring'''
_A : Dict ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_A : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_A : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : List[str] =logging.get_logger(__name__)
_A : List[str] ={
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowerCamelCase__ ( a__ , a__ ):
'''simple docstring'''
A_ = """bit"""
A_ = ["""preactivation""", """bottleneck"""]
A_ = ["""SAME""", """VALID"""]
def __init__( self : str , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Tuple=64 , UpperCamelCase_ : Optional[Any]=[256, 512, 1024, 2048] , UpperCamelCase_ : List[str]=[3, 4, 6, 3] , UpperCamelCase_ : Tuple="preactivation" , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=32 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : Any=None , UpperCamelCase_ : Any=None , **UpperCamelCase_ : List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowercase : Dict = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_lowercase : Optional[int] = num_channels
_lowercase : str = embedding_size
_lowercase : Optional[int] = hidden_sizes
_lowercase : Optional[int] = depths
_lowercase : Dict = layer_type
_lowercase : str = hidden_act
_lowercase : Tuple = global_padding
_lowercase : Dict = num_groups
_lowercase : Optional[Any] = drop_path_rate
_lowercase : Tuple = embedding_dynamic_padding
_lowercase : Union[str, Any] = output_stride
_lowercase : Dict = width_factor
_lowercase : List[Any] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(_A ) + 1 )]
_lowercase : Dict = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 715 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : int = torch.exp(_lowercase )
_lowercase : List[str] = torch.sum(_lowercase, dim=1 ) # sum of exp(x_i)
_lowercase : str = torch.sum(x * exp_x, dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowercase ) - B / A
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase : int = config.output_attentions
_lowercase : int = config.output_hidden_states
_lowercase : Union[str, Any] = nn.ModuleList([BertLayer(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : List[Any] = nn.ModuleList([BertHighway(UpperCamelCase_ ) for _ in range(config.num_hidden_layers )] )
_lowercase : Tuple = [-1 for _ in range(config.num_hidden_layers )]
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
if (type(UpperCamelCase_ ) is float) or (type(UpperCamelCase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowercase : Optional[Any] = x
else:
_lowercase : Optional[int] = x
def __UpperCAmelCase ( self : List[Any] , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = ()
_lowercase : List[Any] = ()
_lowercase : Tuple = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowercase : Optional[int] = all_hidden_states + (hidden_states,)
_lowercase : str = layer_module(
UpperCamelCase_ , UpperCamelCase_ , head_mask[i] , UpperCamelCase_ , UpperCamelCase_ )
_lowercase : List[str] = layer_outputs[0]
if self.output_attentions:
_lowercase : Tuple = all_attentions + (layer_outputs[1],)
_lowercase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowercase : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[int] = current_outputs + (all_attentions,)
_lowercase : List[Any] = self.highway[i](UpperCamelCase_ )
# logits, pooled_output
if not self.training:
_lowercase : Dict = highway_exit[0]
_lowercase : Tuple = entropy(UpperCamelCase_ )
_lowercase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowercase : str = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowercase : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase_ , i + 1 )
else:
_lowercase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowercase : str = all_hidden_states + (hidden_states,)
_lowercase : Optional[Any] = (hidden_states,)
if self.output_hidden_states:
_lowercase : Dict = outputs + (all_hidden_states,)
if self.output_attentions:
_lowercase : Optional[Any] = outputs + (all_attentions,)
_lowercase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : int = config
_lowercase : int = BertEmbeddings(UpperCamelCase_ )
_lowercase : List[Any] = DeeBertEncoder(UpperCamelCase_ )
_lowercase : Any = BertPooler(UpperCamelCase_ )
self.init_weights()
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.embeddings.word_embeddings
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = value
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : Tuple=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowercase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase : Tuple = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if encoder_attention_mask is None:
_lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
_lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowercase : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowercase : int = encoder_attention_mask[:, None, None, :]
_lowercase : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowercase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
_lowercase : Dict = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
_lowercase : List[Any] = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
_lowercase : int = encoder_outputs[0]
_lowercase : str = self.pooler(UpperCamelCase_ )
_lowercase : List[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Any = message
_lowercase : Dict = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase : Optional[Any] = BertPooler(UpperCamelCase_ )
_lowercase : List[Any] = nn.Dropout(config.hidden_dropout_prob )
_lowercase : int = nn.Linear(config.hidden_size , config.num_labels )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowercase : str = encoder_outputs[0]
_lowercase : int = self.pooler(UpperCamelCase_ )
# "return" pooler_output
# BertModel
_lowercase : Optional[int] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowercase : Dict = bmodel_output[1]
_lowercase : Union[str, Any] = self.dropout(UpperCamelCase_ )
_lowercase : str = self.classifier(UpperCamelCase_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A , )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase_ )
_lowercase : Dict = config.num_labels
_lowercase : Any = config.num_hidden_layers
_lowercase : Optional[int] = DeeBertModel(UpperCamelCase_ )
_lowercase : Any = nn.Dropout(config.hidden_dropout_prob )
_lowercase : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=-1 , UpperCamelCase_ : Union[str, Any]=False , ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = self.num_layers
try:
_lowercase : Tuple = self.bert(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowercase : List[Any] = outputs[1]
_lowercase : int = self.dropout(UpperCamelCase_ )
_lowercase : Optional[int] = self.classifier(UpperCamelCase_ )
_lowercase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowercase : Union[str, Any] = e.message
_lowercase : Any = e.exit_layer
_lowercase : Optional[int] = outputs[0]
if not self.training:
_lowercase : Union[str, Any] = entropy(UpperCamelCase_ )
_lowercase : Tuple = []
_lowercase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowercase : Tuple = MSELoss()
_lowercase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Union[str, Any] = CrossEntropyLoss()
_lowercase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowercase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowercase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowercase : Union[str, Any] = MSELoss()
_lowercase : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowercase : Dict = CrossEntropyLoss()
_lowercase : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase_ )
if train_highway:
_lowercase : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowercase : Optional[Any] = (loss,) + outputs
if not self.training:
_lowercase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowercase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 4 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Optional[int]=10 , UpperCamelCase_ : Optional[Any]=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]="relu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Union[str, Any]=None , ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[str] = image_size
_lowercase : List[str] = num_channels
_lowercase : Tuple = embeddings_size
_lowercase : Optional[int] = hidden_sizes
_lowercase : Dict = depths
_lowercase : Optional[int] = is_training
_lowercase : List[str] = use_labels
_lowercase : List[Any] = hidden_act
_lowercase : Optional[int] = num_labels
_lowercase : int = scope
_lowercase : List[Any] = len(__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = TFRegNetModel(config=__lowerCamelCase )
_lowercase : str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.num_labels
_lowercase : int = TFRegNetForImageClassification(__lowerCamelCase )
_lowercase : str = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
_lowercase : Any = self.prepare_config_and_inputs()
_lowercase : str = config_and_inputs
_lowercase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
A_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A_ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = TFRegNetModelTester(self )
_lowercase : int = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(__lowerCamelCase )
_lowercase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Union[str, Any] = [*signature.parameters.keys()]
_lowercase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ):
_lowercase : int = model_class(__lowerCamelCase )
_lowercase : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
_lowercase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase : List[str] = layer_type
_lowercase : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]={} ):
_lowercase : Optional[int] = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
_lowercase : int = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
_lowercase : Tuple = model_class(__lowerCamelCase )
_lowercase : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_lowercase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowercase : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
_lowercase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_lowercase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_lowercase : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {'output_hidden_states': True} )
_lowercase : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
_lowercase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {'output_hidden_states': True} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __UpperCamelCase ( ) -> Any:
_lowercase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowercase : Optional[int] = self.default_image_processor
_lowercase : List[Any] = prepare_img()
_lowercase : str = image_processor(images=__lowerCamelCase , return_tensors='tf' )
# forward pass
_lowercase : List[Any] = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
_lowercase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_lowercase : Optional[Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
| 716 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : List[Any] = [10, 20, 30, 40, 50, 60]
_lowercase : Tuple = [2, 4, 6, 8, 10, 12]
_lowercase : Optional[Any] = 100
self.assertEqual(kp.calc_profit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , 210 )
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Weight can not be negative.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'Profit can not be negative.' )
def __UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase_ , 'max_weight must greater than zero.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase_ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 4 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
A_ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCamelCase_ : Union[str, Any]=1024 , UpperCamelCase_ : Optional[int]=768 , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = transformerDimSize
_lowercase : int = imageDimSize
super().__init__(**lowerCamelCase_ )
class lowerCamelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
A_ = MCLIPConfig
def __init__( self : Optional[int] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Optional[int] = XLMRobertaModel(lowerCamelCase_ )
_lowercase : Optional[int] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __UpperCAmelCase ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = self.transformer(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
_lowercase : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCamelCase_ ), embs
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCamelCase ( _lowercase ) -> Tuple:
return 1 / (1 + np.exp(-z ))
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
return (-y * np.log(_A ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
_lowercase : Optional[Any] = np.dot(_A, _A )
return np.sum(y * scores - np.log(1 + np.exp(_A ) ) )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase, _lowercase=7_0000 ) -> int:
_lowercase : Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(_A ):
_lowercase : Union[str, Any] = np.dot(_A, _A )
_lowercase : str = sigmoid_function(_A )
_lowercase : Any = np.dot(x.T, h - y ) / y.size
_lowercase : str = theta - alpha * gradient # updating the weights
_lowercase : List[Any] = np.dot(_A, _A )
_lowercase : Union[str, Any] = sigmoid_function(_A )
_lowercase : List[Any] = cost_function(_A, _A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_A : List[Any] =datasets.load_iris()
_A : List[str] =iris.data[:, :2]
_A : Optional[Any] =(iris.target != 0) * 1
_A : List[Any] =0.1
_A : Optional[int] =logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __UpperCamelCase ( _lowercase ) -> str:
return sigmoid_function(
np.dot(_A, _A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
(_A) : str =(x[:, 0].min(), x[:, 0].max())
(_A) : Tuple =(x[:, 1].min(), x[:, 1].max())
(_A) : List[str] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_A : Any =np.c_[xxa.ravel(), xxa.ravel()]
_A : str =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 718 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Optional[Any] =logging.get_logger(__name__)
_A : Optional[int] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """markuplm"""
def __init__( self : int , UpperCamelCase_ : Optional[Any]=3_0522 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Union[str, Any]=12 , UpperCamelCase_ : Tuple=3072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Optional[Any]=1E-12 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : str=256 , UpperCamelCase_ : Optional[Any]=1024 , UpperCamelCase_ : Union[str, Any]=216 , UpperCamelCase_ : int=1001 , UpperCamelCase_ : int=32 , UpperCamelCase_ : int=50 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : List[Any] = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Dict = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : List[Any] = type_vocab_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Optional[Any] = position_embedding_type
_lowercase : str = use_cache
_lowercase : str = classifier_dropout
# additional properties
_lowercase : int = max_depth
_lowercase : Dict = max_xpath_tag_unit_embeddings
_lowercase : str = max_xpath_subs_unit_embeddings
_lowercase : List[str] = tag_pad_id
_lowercase : Optional[int] = subs_pad_id
_lowercase : Any = xpath_unit_hidden_size
| 4 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __UpperCamelCase ( _lowercase ) -> Dict:
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCamelCase__ ( _a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : int = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=snake_case_ , default=snake_case_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=snake_case_ , help='Name of the model to download' )
download_parser.set_defaults(func=snake_case_ )
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
_lowercase : Tuple = model
_lowercase : Optional[int] = cache
_lowercase : Any = force
_lowercase : Dict = trust_remote_code
def __UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 719 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( _lowercase ) -> Tuple:
_lowercase : Tuple = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 48
_lowercase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : Dict = [6, 6, 6, 6]
_lowercase : Optional[int] = 60
_lowercase : List[str] = [6, 6, 6, 6]
_lowercase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : str = 4
_lowercase : str = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowercase : str = 1
_lowercase : Tuple = 1
_lowercase : Dict = 126
_lowercase : Optional[int] = 7
_lowercase : List[Any] = 2_5_5.0
_lowercase : Tuple = ''
return config
def __UpperCamelCase ( _lowercase, _lowercase ) -> str:
if "patch_embed.proj" in name and "layers" not in name:
_lowercase : Tuple = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowercase : Tuple = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
_lowercase : str = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
_lowercase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
_lowercase : List[Any] = name.replace('attn', 'attention.self' )
if "norm1" in name:
_lowercase : List[str] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
_lowercase : Tuple = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
_lowercase : int = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase : List[str] = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
_lowercase : Optional[Any] = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
_lowercase : str = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
_lowercase : int = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
_lowercase : Any = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowercase : Union[str, Any] = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
_lowercase : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
_lowercase : List[Any] = 'layernorm.bias'
if "conv_first" in name:
_lowercase : Tuple = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowercase : List[str] = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowercase : Union[str, Any] = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
_lowercase : str = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
_lowercase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
_lowercase : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowercase : Optional[Any] = name.replace('upsample.0.weight', 'upsample.conv.weight' )
_lowercase : str = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
_lowercase : Tuple = 'swin2sr.' + name
return name
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
for key in orig_state_dict.copy().keys():
_lowercase : int = orig_state_dict.pop(_lowercase )
if "qkv" in key:
_lowercase : Tuple = key.split('.' )
_lowercase : Optional[Any] = int(key_split[1] )
_lowercase : Any = int(key_split[4] )
_lowercase : Optional[Any] = config.embed_dim
if "weight" in key:
_lowercase : Optional[int] = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : int = val[-dim:, :]
else:
_lowercase : Optional[Any] = val[:dim]
_lowercase : Tuple = val[dim : dim * 2]
_lowercase : List[str] = val[-dim:]
pass
else:
_lowercase : List[Any] = val
return orig_state_dict
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Union[str, Any]:
_lowercase : Optional[Any] = get_config(_lowercase )
_lowercase : Union[str, Any] = SwinaSRForImageSuperResolution(_lowercase )
model.eval()
_lowercase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase, map_location='cpu' )
_lowercase : Any = convert_state_dict(_lowercase, _lowercase )
_lowercase , _lowercase : str = model.load_state_dict(_lowercase, strict=_lowercase )
if len(_lowercase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
_lowercase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowercase : Any = Image.open(requests.get(_lowercase, stream=_lowercase ).raw ).convert('RGB' )
_lowercase : Tuple = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowercase : Tuple = 126 if 'Jpeg' in checkpoint_url else 256
_lowercase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
_lowercase : Optional[Any] = transforms(_lowercase ).unsqueeze(0 )
if config.num_channels == 1:
_lowercase : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowercase : Optional[int] = model(_lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 512, 512] )
_lowercase : Tuple = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowercase : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
_lowercase : int = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowercase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
_lowercase : Dict = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowercase : List[str] = torch.Size([1, 3, 512, 512] )
_lowercase : int = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowercase : Any = torch.Size([1, 3, 1024, 1024] )
_lowercase : Union[str, Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowercase, atol=1E-3 )
print('Looks ok!' )
_lowercase : List[str] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowercase : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
_A : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
_A : int =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Dict = None ) -> List[str]:
'''simple docstring'''
super().__init__()
_lowercase : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowercase : Union[str, Any] = torch.zeros(_lowercase , _lowercase )
else:
_lowercase : Tuple = None
_lowercase : Optional[Any] = torch.nn.Parameter(_lowercase )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
A_ = 42
def __init__( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def __UpperCAmelCase ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : str ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
_lowercase : Dict = self.tokenizer(
_lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowercase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowercase : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowercase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
_lowercase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowercase : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
_lowercase : Tuple = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowercase : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_lowercase : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
_lowercase : Union[str, Any] = [""""""] * batch_size
_lowercase : Tuple = text_input_ids.shape[-1]
_lowercase : Dict = self.tokenizer(
_lowercase , padding='max_length' , max_length=_lowercase , truncation=_lowercase , return_tensors='pt' , )
_lowercase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowercase : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowercase : List[Any] = negative_prompt_embeds.shape[1]
_lowercase : int = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
_lowercase : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any = 100 , UpperCamelCase_ : Tuple = 5.0 , UpperCamelCase_ : int = 1.0 , UpperCamelCase_ : Any = 1 , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : Dict = None , UpperCamelCase_ : int = "pil" , UpperCamelCase_ : Union[str, Any] = True , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any] = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
_lowercase : Optional[Any] = 1
elif isinstance(_lowercase , _lowercase ):
_lowercase : List[str] = len(_lowercase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(_lowercase )}''' )
_lowercase : List[str] = batch_size * num_images_per_prompt
_lowercase : List[str] = guidance_scale > 1.0
_lowercase : Tuple = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(_lowercase )}.''' )
# get the initial completely masked latents unless the user supplied it
_lowercase : Optional[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowercase : Optional[Any] = self.transformer.num_vector_embeds - 1
_lowercase : List[str] = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_lowercase : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowercase : Optional[Any] = self.scheduler.timesteps.to(self.device )
_lowercase : int = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowercase : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowercase : Dict = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
_lowercase : Any = model_output.chunk(2 )
_lowercase : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
_lowercase : str = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
_lowercase : Union[str, Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : List[str] = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_lowercase : str = self.vqvae.config.vq_embed_dim
_lowercase : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowercase : Optional[int] = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
_lowercase : int = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
_lowercase : int = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : Any = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ) -> torch.FloatTensor:
'''simple docstring'''
_lowercase : List[Any] = torch.sort(_lowercase , 1 , descending=_lowercase )
_lowercase : Optional[Any] = torch.exp(_lowercase )
_lowercase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowercase : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
_lowercase : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
_lowercase : int = keep_mask[:, :-1, :]
_lowercase : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
_lowercase : int = log_p_x_0.clone()
_lowercase : Any = -torch.inf # -inf = log(0)
return rv
| 720 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> list:
_lowercase : List[str] = word.split()
def justify(_lowercase, _lowercase, _lowercase ) -> str:
_lowercase : Dict = max_width - width
_lowercase : Tuple = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_lowercase : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_lowercase : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_lowercase : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
_lowercase : Union[str, Any] = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
_lowercase : str = []
_lowercase : list[str] = []
_lowercase : Union[str, Any] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase, _lowercase, _lowercase ) )
# reset new line and new width
_lowercase , _lowercase : Optional[Any] = [word], len(_lowercase )
_lowercase : Optional[int] = max_width - width - len(_lowercase )
answer.append(' '.join(_lowercase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 4 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.