code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = None class _lowercase ( lowercase__ , lowercase__): """simple docstring""" A__ = 2 @register_to_config def __init__( self : List[str] , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 100 , __lowerCamelCase : float = 1.0_0_7 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.0_5 , __lowerCamelCase : float = 50 , ): '''simple docstring''' lowerCamelCase__ : List[str] = sigma_max # setable values lowerCamelCase__ : int = None lowerCamelCase__ : np.IntTensor = None lowerCamelCase__ : torch.FloatTensor = None # sigma(t_i) def lowerCAmelCase ( self : Any , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ): '''simple docstring''' return sample def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ): '''simple docstring''' lowerCamelCase__ : Tuple = num_inference_steps lowerCamelCase__ : int = np.arange(0 , self.num_inference_steps )[::-1].copy() lowerCamelCase__ : Union[str, Any] = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase ) lowerCamelCase__ : Any = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowerCamelCase__ : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.floataa , device=__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : Optional[torch.Generator] = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase__ : Dict = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase__ : Tuple = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase__ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=__lowerCamelCase ).to(sample.device ) lowerCamelCase__ : str = sigma + gamma * sigma lowerCamelCase__ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : str = sample_hat + sigma_hat * model_output lowerCamelCase__ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : Any = sample_prev + sigma_prev * model_output lowerCamelCase__ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , pred_original_sample=__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ): '''simple docstring''' raise NotImplementedError()
5
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer A : Union[str, Any] = logging.get_logger(__name__) A : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} A : List[Any] = [ "small", "small-base", "medium", "medium-base", "intermediate", "intermediate-base", "large", "large-base", "xlarge", "xlarge-base", ] A : Optional[int] = { "vocab_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json", "funnel-transformer/small-base": ( "https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json" ), "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json", "funnel-transformer/medium-base": ( "https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json", "funnel-transformer/large-base": ( "https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json" ), "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json", "funnel-transformer/xlarge-base": ( "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json" ), }, } A : Union[str, Any] = {f'funnel-transformer/{name}': 512 for name in _model_names} A : Union[str, Any] = {f'funnel-transformer/{name}': {"do_lower_case": True} for name in _model_names} class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = FunnelTokenizer A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = 2 def __init__( self : Optional[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[Any]="<sep>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : str="<cls>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]="##" , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , clean_text=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , wordpieces_prefix=__lowerCamelCase , **__lowerCamelCase , ) lowerCamelCase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars ): lowerCamelCase__ : int = getattr(__lowerCamelCase , normalizer_state.pop("type" ) ) lowerCamelCase__ : Dict = do_lower_case lowerCamelCase__ : Union[str, Any] = strip_accents lowerCamelCase__ : Dict = tokenize_chinese_chars lowerCamelCase__ : Optional[int] = normalizer_class(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = do_lower_case def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None ): '''simple docstring''' lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : List[str] = [self.sep_token_id] lowerCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' lowerCamelCase__ : List[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase )
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : KarrasDiffusionSchedulers , __lowerCamelCase : Optional[Dict[int, str]] = None , ): '''simple docstring''' super().__init__() self.register_modules(transformer=__lowerCamelCase , vae=__lowerCamelCase , scheduler=__lowerCamelCase ) # create a imagenet -> id dictionary for easier use lowerCamelCase__ : Dict = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split("," ): lowerCamelCase__ : Dict = int(__lowerCamelCase ) lowerCamelCase__ : str = dict(sorted(self.labels.items() ) ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, List[str]] ): '''simple docstring''' if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : Optional[int] = list(__lowerCamelCase ) for l in label: if l not in self.labels: raise ValueError( f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : float = 4.0 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ): '''simple docstring''' lowerCamelCase__ : str = len(__lowerCamelCase ) lowerCamelCase__ : Dict = self.transformer.config.sample_size lowerCamelCase__ : Tuple = self.transformer.config.in_channels lowerCamelCase__ : Union[str, Any] = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__lowerCamelCase , device=self.device , dtype=self.transformer.dtype , ) lowerCamelCase__ : Union[str, Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowerCamelCase__ : List[Any] = torch.tensor(__lowerCamelCase , device=self.device ).reshape(-1 ) lowerCamelCase__ : str = torch.tensor([1000] * batch_size , device=self.device ) lowerCamelCase__ : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowerCamelCase__ : int = latent_model_input[: len(__lowerCamelCase ) // 2] lowerCamelCase__ : Tuple = torch.cat([half, half] , dim=0 ) lowerCamelCase__ : Union[str, Any] = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = t if not torch.is_tensor(__lowerCamelCase ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowerCamelCase__ : List[Any] = latent_model_input.device.type == "mps" if isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : List[str] = torch.floataa if is_mps else torch.floataa else: lowerCamelCase__ : Any = torch.intaa if is_mps else torch.intaa lowerCamelCase__ : Dict = torch.tensor([timesteps] , dtype=__lowerCamelCase , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowerCamelCase__ : Any = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCamelCase__ : Any = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowerCamelCase__ : List[str] = self.transformer( __lowerCamelCase , timestep=__lowerCamelCase , class_labels=__lowerCamelCase ).sample # perform guidance if guidance_scale > 1: lowerCamelCase__ , lowerCamelCase__ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowerCamelCase__ , lowerCamelCase__ : Dict = torch.split(__lowerCamelCase , len(__lowerCamelCase ) // 2 , dim=0 ) lowerCamelCase__ : Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowerCamelCase__ : Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 ) lowerCamelCase__ : Tuple = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = torch.split(__lowerCamelCase , __lowerCamelCase , dim=1 ) else: lowerCamelCase__ : str = noise_pred # compute previous image: x_t -> x_t-1 lowerCamelCase__ : Dict = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample if guidance_scale > 1: lowerCamelCase__ , lowerCamelCase__ : int = latent_model_input.chunk(2 , dim=0 ) else: lowerCamelCase__ : str = latent_model_input lowerCamelCase__ : Optional[int] = 1 / self.vae.config.scaling_factor * latents lowerCamelCase__ : Optional[int] = self.vae.decode(__lowerCamelCase ).sample lowerCamelCase__ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase__ : Dict = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase__ : List[Any] = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (samples,) return ImagePipelineOutput(images=__lowerCamelCase )
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
def lowercase_ ( _A : float ): """simple docstring""" return 10 - x * x def lowercase_ ( _A : float , _A : float ): """simple docstring""" if equation(_A ) * equation(_A ) >= 0: raise ValueError("Wrong space!" ) lowerCamelCase__ : List[str] = a while (b - a) >= 0.01: # Find middle point lowerCamelCase__ : List[Any] = (a + b) / 2 # Check if middle point is root if equation(_A ) == 0.0: break # Decide the side to repeat the steps if equation(_A ) * equation(_A ) < 0: lowerCamelCase__ : List[str] = c else: lowerCamelCase__ : List[Any] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _A : Dict ): """simple docstring""" lowerCamelCase__ : Dict = [] embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight", F"stage{idx}.patch_embed.proj.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias", F"stage{idx}.patch_embed.proj.bias", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight", F"stage{idx}.patch_embed.norm.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias", F"stage{idx}.patch_embed.norm.bias", ) ) return embed def lowercase_ ( _A : List[str] , _A : Optional[Any] ): """simple docstring""" lowerCamelCase__ : int = [] attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight", F"stage{idx}.blocks.{cnt}.attn.proj_q.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias", F"stage{idx}.blocks.{cnt}.attn.proj_q.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight", F"stage{idx}.blocks.{cnt}.attn.proj_k.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias", F"stage{idx}.blocks.{cnt}.attn.proj_k.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight", F"stage{idx}.blocks.{cnt}.attn.proj_v.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias", F"stage{idx}.blocks.{cnt}.attn.proj_v.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight", F"stage{idx}.blocks.{cnt}.attn.proj.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias", F"stage{idx}.blocks.{cnt}.attn.proj.bias", ) ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") ) return attention_weights def lowercase_ ( _A : int ): """simple docstring""" lowerCamelCase__ : Optional[int] = [] token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") ) return token def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = [] head.append(("layernorm.weight", "norm.weight") ) head.append(("layernorm.bias", "norm.bias") ) head.append(("classifier.weight", "head.weight") ) head.append(("classifier.bias", "head.bias") ) return head def lowercase_ ( _A : List[Any] , _A : Dict , _A : Dict , _A : Union[str, Any] ): """simple docstring""" lowerCamelCase__ : Any = "imagenet-1k-id2label.json" lowerCamelCase__ : Optional[int] = 1000 lowerCamelCase__ : int = "huggingface/label-files" lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : Any = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type="dataset" ) ) , "r" ) ) lowerCamelCase__ : Dict = {int(_A ): v for k, v in idalabel.items()} lowerCamelCase__ : str = idalabel lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} lowerCamelCase__ : Union[str, Any] = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13": lowerCamelCase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21": lowerCamelCase__ : List[str] = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowerCamelCase__ : List[Any] = [2, 2, 20] lowerCamelCase__ : str = [3, 12, 16] lowerCamelCase__ : Tuple = [192, 768, 1024] lowerCamelCase__ : Optional[Any] = CvtForImageClassification(_A ) lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) lowerCamelCase__ : Optional[Any] = image_size lowerCamelCase__ : Union[str, Any] = torch.load(_A , map_location=torch.device("cpu" ) ) lowerCamelCase__ : Any = OrderedDict() lowerCamelCase__ : Dict = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowerCamelCase__ : Union[str, Any] = list_of_state_dict + cls_token(_A ) lowerCamelCase__ : Tuple = list_of_state_dict + embeddings(_A ) for cnt in range(config.depth[idx] ): lowerCamelCase__ : List[Any] = list_of_state_dict + attention(_A , _A ) lowerCamelCase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_A ) for i in range(len(_A ) ): lowerCamelCase__ : Any = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_A ) model.save_pretrained(_A ) image_processor.save_pretrained(_A ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A : List[str] = argparse.ArgumentParser() parser.add_argument( "--cvt_model", default="cvt-w24", type=str, help="Name of the cvt model you'd like to convert.", ) parser.add_argument( "--image_size", default=384, type=int, help="Input Image Size", ) parser.add_argument( "--cvt_file_name", default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth", type=str, help="Input Image Size", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) A : List[str] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
5
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' return f"gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : List[Any]=(4, 4, 64, 64) , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Any = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return image def lowerCAmelCase ( self : str , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' lowerCamelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Optional[int] = "bf16" if fpaa else None lowerCamelCase__ , lowerCamelCase__ : Optional[int] = FlaxUNetaDConditionModel.from_pretrained( __lowerCamelCase , subfolder="unet" , dtype=__lowerCamelCase , revision=__lowerCamelCase ) return model, params def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Tuple=0 , __lowerCamelCase : List[Any]=(4, 77, 768) , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase__ : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase , __lowerCamelCase ) ) , dtype=__lowerCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = self.get_latents(__lowerCamelCase , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = self.get_encoder_hidden_states(__lowerCamelCase , fpaa=__lowerCamelCase ) lowerCamelCase__ : int = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase__ : List[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : str = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=__lowerCamelCase ) lowerCamelCase__ : Dict = self.get_latents(__lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCamelCase ) lowerCamelCase__ : str = self.get_encoder_hidden_states(__lowerCamelCase , shape=(4, 77, 1024) , fpaa=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = model.apply( {"params": params} , __lowerCamelCase , jnp.array(__lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCamelCase , ).sample assert sample.shape == latents.shape lowerCamelCase__ : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase__ : Optional[Any] = jnp.array(__lowerCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-2 )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
class _lowercase ( lowercase__): """simple docstring""" pass class _lowercase ( lowercase__): """simple docstring""" pass class _lowercase : """simple docstring""" def __init__( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = [ [], [], [], ] def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' try: if len(self.queues[priority] ) >= 100: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(__lowerCamelCase ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def lowerCAmelCase ( self : int ): '''simple docstring''' for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self : Optional[Any] ): '''simple docstring''' return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) ) class _lowercase : """simple docstring""" def __init__( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = [] def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int ): '''simple docstring''' if len(self.queue ) == 100: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.queue: raise UnderFlowError("The queue is empty" ) else: lowerCamelCase__ : str = min(self.queue ) self.queue.remove(__lowerCamelCase ) return data def __str__( self : Union[str, Any] ): '''simple docstring''' return str(self.queue ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def lowercase_ ( _A : Dict=None ): """simple docstring""" if subparsers is not None: lowerCamelCase__ : List[Any] = subparsers.add_parser("env" ) else: lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=_A , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def lowercase_ ( _A : Tuple ): """simple docstring""" lowerCamelCase__ : int = torch.__version__ lowerCamelCase__ : Optional[int] = torch.cuda.is_available() lowerCamelCase__ : Any = is_xpu_available() lowerCamelCase__ : Dict = is_npu_available() lowerCamelCase__ : str = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(_A ): lowerCamelCase__ : Any = load_config_from_file(args.config_file ).to_dict() lowerCamelCase__ : List[str] = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})", "PyTorch XPU available": str(_A ), "PyTorch NPU available": str(_A ), "System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", } if pt_cuda_available: lowerCamelCase__ : Dict = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) lowerCamelCase__ : Optional[int] = ( "\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(_A , _A ) else F"\t{accelerate_config}" ) print(_A ) lowerCamelCase__ : Tuple = accelerate_config return info def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = env_command_parser() lowerCamelCase__ : List[Any] = parser.parse_args() env_command(_A ) return 0 if __name__ == "__main__": raise SystemExit(main())
5
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
1
from typing import List import numpy as np def lowercase_ ( _A : dict ): """simple docstring""" lowerCamelCase__ : Any = {key: len(_A ) for key, value in gen_kwargs.items() if isinstance(_A , _A )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) lowerCamelCase__ : Optional[Any] = max(lists_lengths.values() , default=0 ) return max(1 , _A ) def lowercase_ ( _A : int , _A : int ): """simple docstring""" lowerCamelCase__ : List[str] = [] for group_idx in range(_A ): lowerCamelCase__ : str = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase__ : List[str] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase__ : Tuple = range(_A , start + num_shards_to_add ) shards_indices_per_group.append(_A ) return shards_indices_per_group def lowercase_ ( _A : dict , _A : int ): """simple docstring""" lowerCamelCase__ : int = _number_of_shards_in_gen_kwargs(_A ) if num_shards == 1: return [dict(_A )] else: lowerCamelCase__ : Any = _distribute_shards(num_shards=_A , max_num_jobs=_A ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(_A , _A ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(_A ) ) ] def lowercase_ ( _A : List[dict] ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , _A ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase_ ( _A : np.random.Generator , _A : dict ): """simple docstring""" lowerCamelCase__ : Optional[Any] = {len(_A ) for value in gen_kwargs.values() if isinstance(_A , _A )} lowerCamelCase__ : Any = {} for size in list_sizes: lowerCamelCase__ : str = list(range(_A ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase__ : Tuple = dict(_A ) for key, value in shuffled_kwargs.items(): if isinstance(_A , _A ): lowerCamelCase__ : Tuple = [value[i] for i in indices_per_size[len(_A )]] return shuffled_kwargs
5
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL A : List[Any] = logging.get_logger(__name__) def lowercase_ ( _A : np.ndarray , _A : Union[int, Iterable[int]] , _A : bool , _A : int ): """simple docstring""" def constraint_to_multiple_of(_A : Optional[Any] , _A : Optional[int] , _A : Any=0 , _A : int=None ): lowerCamelCase__ : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCamelCase__ : str = math.floor(val / multiple ) * multiple if x < min_val: lowerCamelCase__ : Any = math.ceil(val / multiple ) * multiple return x lowerCamelCase__ : int = (output_size, output_size) if isinstance(_A , _A ) else output_size lowerCamelCase__ , lowerCamelCase__ : int = get_image_size(_A ) lowerCamelCase__ , lowerCamelCase__ : List[str] = output_size # determine new height and width lowerCamelCase__ : int = output_height / input_height lowerCamelCase__ : int = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCamelCase__ : Tuple = scale_width else: # fit height lowerCamelCase__ : Optional[int] = scale_height lowerCamelCase__ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_A ) lowerCamelCase__ : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=_A ) return (new_height, new_width) class _lowercase ( lowercase__): """simple docstring""" A__ = ["pixel_values"] def __init__( self : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : List[str] , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = size if size is not None else {"height": 384, "width": 384} lowerCamelCase__ : Any = get_size_dict(__lowerCamelCase ) lowerCamelCase__ : Any = do_resize lowerCamelCase__ : int = size lowerCamelCase__ : str = keep_aspect_ratio lowerCamelCase__ : Tuple = ensure_multiple_of lowerCamelCase__ : Union[str, Any] = resample lowerCamelCase__ : int = do_rescale lowerCamelCase__ : int = rescale_factor lowerCamelCase__ : List[Any] = do_normalize lowerCamelCase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : bool = False , __lowerCamelCase : int = 1 , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' lowerCamelCase__ : str = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) lowerCamelCase__ : List[Any] = get_resize_output_image_size( __lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__lowerCamelCase , multiple=__lowerCamelCase , ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ): '''simple docstring''' return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : List[Any] , ): '''simple docstring''' lowerCamelCase__ : str = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Tuple = size if size is not None else self.size lowerCamelCase__ : Dict = get_size_dict(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCamelCase__ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCamelCase__ : int = resample if resample is not None else self.resample lowerCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : Dict = image_std if image_std is not None else self.image_std lowerCamelCase__ : List[Any] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCamelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: lowerCamelCase__ : Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : Dict = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: lowerCamelCase__ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] lowerCamelCase__ : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] lowerCamelCase__ : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Tuple] = None ): '''simple docstring''' lowerCamelCase__ : List[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowerCamelCase ): lowerCamelCase__ : Any = target_sizes.numpy() lowerCamelCase__ : Optional[Any] = [] for idx in range(len(__lowerCamelCase ) ): lowerCamelCase__ : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCamelCase ) else: lowerCamelCase__ : Union[str, Any] = logits.argmax(dim=1 ) lowerCamelCase__ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
5
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
1
from functools import reduce A : List[Any] = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowercase_ ( _A : str = N ): """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) ) for i in range(len(_A ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
5
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
from datetime import datetime import requests def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Any = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" lowerCamelCase__ : Tuple = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(_A ).content if __name__ == "__main__": A : Optional[Any] = input("Enter Video/IGTV url: ").strip() A : List[str] = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f'Done. Video saved to disk as {file_name}.')
5
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
1
from __future__ import annotations def lowercase_ ( _A : list[int] ): """simple docstring""" if not nums: return 0 lowerCamelCase__ : List[str] = nums[0] lowerCamelCase__ : str = 0 for num in nums[1:]: lowerCamelCase__ , lowerCamelCase__ : str = ( max_excluding + num, max(_A , _A ), ) return max(_A , _A ) if __name__ == "__main__": import doctest doctest.testmod()
5
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
1
import argparse import json import subprocess def lowercase_ ( _A : Dict , _A : Tuple ): """simple docstring""" lowerCamelCase__ : str = [] lowerCamelCase__ : str = ( F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"" " https://api.github.com/repos/huggingface/transformers/actions/runners" ) lowerCamelCase__ : List[Any] = subprocess.run(_A , shell=_A , stdout=subprocess.PIPE ) lowerCamelCase__ : Union[str, Any] = output.stdout.decode("utf-8" ) lowerCamelCase__ : Dict = json.loads(_A ) lowerCamelCase__ : Dict = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_A ) # save the result so we can report them on Slack with open("offline_runners.txt" , "w" ) as fp: fp.write(json.dumps(_A ) ) if len(_A ) > 0: lowerCamelCase__ : Optional[Any] = "\n".join([x["name"] for x in offline_runners] ) raise ValueError(F"The following runners are offline:\n{failed}" ) if __name__ == "__main__": def lowercase_ ( _A : Any ): """simple docstring""" return values.split("," ) A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) A : Optional[int] = parser.parse_args() get_runner_status(args.target_runners, args.token)
5
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : str=13 , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : Optional[Any]=4 , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : Optional[int] = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : str = is_training lowerCamelCase__ : Dict = use_attention_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : Tuple = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[Any] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Dict = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Union[str, Any] = num_choices def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Dict = None if self.use_attention_mask: lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[int] = None if self.use_token_type_ids: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Optional[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs lowerCamelCase__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = FlaxAlbertModelTester(self ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase ) @require_flax class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowerCamelCase__ : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = (1, 11, 768) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Any = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
1
import heapq as hq import math from collections.abc import Iterator class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = str(id_ ) lowerCamelCase__ : List[str] = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Any = [] lowerCamelCase__ : Dict = {} # {vertex:distance} def __lt__( self : List[Any] , __lowerCamelCase : Dict ): '''simple docstring''' return self.key < other.key def __repr__( self : Optional[Any] ): '''simple docstring''' return self.id def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ): '''simple docstring''' self.neighbors.append(__lowerCamelCase ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : List[Any] = weight def lowercase_ ( _A : str , _A : int , _A : Optional[int] , _A : List[str] ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _A ) graph[b - 1].add_edge(graph[a - 1] , _A ) def lowercase_ ( _A : list , _A : Vertex ): """simple docstring""" lowerCamelCase__ : str = [] for u in graph: lowerCamelCase__ : str = math.inf lowerCamelCase__ : str = None lowerCamelCase__ : str = 0 lowerCamelCase__ : Union[str, Any] = graph[:] while q: lowerCamelCase__ : Any = min(_A ) q.remove(_A ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase__ : Dict = u lowerCamelCase__ : int = u.edges[v.id] for i in range(1 , len(_A ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowercase_ ( _A : list , _A : Vertex ): """simple docstring""" for u in graph: lowerCamelCase__ : Any = math.inf lowerCamelCase__ : str = None lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : Tuple = list(_A ) hq.heapify(_A ) while h: lowerCamelCase__ : int = hq.heappop(_A ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase__ : Dict = u lowerCamelCase__ : int = u.edges[v.id] hq.heapify(_A ) for i in range(1 , len(_A ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowercase_ ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
5
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax A : Tuple = logging.get_logger(__name__) @add_end_docstrings(lowercase__) class _lowercase ( lowercase__): """simple docstring""" def __init__( self : str , **__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict , __lowerCamelCase : Union[str, List[str], "Image", List["Image"]] , **__lowerCamelCase : Optional[int] ): '''simple docstring''' return super().__call__(__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : int , **__lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = {} if "candidate_labels" in kwargs: lowerCamelCase__ : Optional[Any] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase__ : str = kwargs["hypothesis_template"] return preprocess_params, {}, {} def lowerCAmelCase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]="This is a photo of {}." ): '''simple docstring''' lowerCamelCase__ : int = load_image(__lowerCamelCase ) lowerCamelCase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCamelCase__ : List[Any] = candidate_labels lowerCamelCase__ : Any = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels] lowerCamelCase__ : int = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase ) lowerCamelCase__ : str = [text_inputs] return inputs def lowerCAmelCase ( self : int , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = model_inputs.pop("candidate_labels" ) lowerCamelCase__ : List[str] = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __lowerCamelCase ): lowerCamelCase__ : Optional[Any] = text_inputs[0] else: # Batching case. lowerCamelCase__ : Union[str, Any] = text_inputs[0][0] lowerCamelCase__ : Optional[int] = self.model(**__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = model_outputs.pop("candidate_labels" ) lowerCamelCase__ : Optional[int] = model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCamelCase__ : List[str] = probs.tolist() if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCamelCase__ : str = [scores] elif self.framework == "tf": lowerCamelCase__ : Tuple = stable_softmax(__lowerCamelCase , axis=-1 ) lowerCamelCase__ : Dict = probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}" ) lowerCamelCase__ : str = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] ) ] return result
5
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
1
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor A : str = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" def __init__( self : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ): '''simple docstring''' warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
5
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging A : Union[str, Any] = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" A__ = ["input_features", "attention_mask"] def __init__( self : str , __lowerCamelCase : str=80 , __lowerCamelCase : Optional[int]=16000 , __lowerCamelCase : Optional[int]=80 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = num_mel_bins lowerCamelCase__ : Any = do_ceptral_normalize lowerCamelCase__ : List[str] = normalize_means lowerCamelCase__ : List[Any] = normalize_vars lowerCamelCase__ : int = True def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , ): '''simple docstring''' lowerCamelCase__ : int = waveform * (2**15) # Kaldi compliance: 16-bit signed integers lowerCamelCase__ : Dict = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ) lowerCamelCase__ : Optional[Any] = ta_kaldi.fbank(__lowerCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def lowerCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : Optional[bool] = True , __lowerCamelCase : float = 0.0 , ): '''simple docstring''' if normalize_means: lowerCamelCase__ : int = x[:input_length].mean(axis=0 ) lowerCamelCase__ : int = np.subtract(__lowerCamelCase , __lowerCamelCase ) if normalize_vars: lowerCamelCase__ : int = x[:input_length].std(axis=0 ) lowerCamelCase__ : List[Any] = np.divide(__lowerCamelCase , __lowerCamelCase ) if input_length < x.shape[0]: lowerCamelCase__ : Optional[int] = padding_value # make sure array is in float32 lowerCamelCase__ : List[Any] = x.astype(np.floataa ) return x def lowerCAmelCase ( self : Any , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : Optional[np.ndarray] = None ): '''simple docstring''' lowerCamelCase__ : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__lowerCamelCase , __lowerCamelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__lowerCamelCase , __lowerCamelCase ) ] def __call__( self : Dict , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowerCamelCase__ : int = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) lowerCamelCase__ : int = is_batched_numpy or ( isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase__ : Optional[Any] = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ): lowerCamelCase__ : List[Any] = np.asarray(__lowerCamelCase , dtype=np.floataa ) elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCamelCase__ : List[str] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase__ : Dict = [raw_speech] # extract fbank features lowerCamelCase__ : Dict = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech] # convert into correct format for padding lowerCamelCase__ : Optional[Any] = BatchFeature({"input_features": features} ) lowerCamelCase__ : Dict = self.pad( __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , ) # make sure list is in array format lowerCamelCase__ : Tuple = padded_inputs.get("input_features" ) if isinstance(input_features[0] , __lowerCamelCase ): lowerCamelCase__ : List[str] = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features] lowerCamelCase__ : Optional[int] = padded_inputs.get("attention_mask" ) if attention_mask is not None: lowerCamelCase__ : Tuple = [np.asarray(__lowerCamelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: lowerCamelCase__ : Any = ( np.array(__lowerCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__lowerCamelCase , max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCamelCase__ : List[Any] = self.normalize( padded_inputs["input_features"] , attention_mask=__lowerCamelCase ) if return_tensors is not None: lowerCamelCase__ : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCamelCase ) return padded_inputs
5
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = tempfile.mkdtemp() lowerCamelCase__ : Optional[int] = BlipImageProcessor() lowerCamelCase__ : Tuple = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCamelCase__ : Optional[Any] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" ) lowerCamelCase__ : List[str] = InstructBlipProcessor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def lowerCAmelCase ( self : Dict , **__lowerCamelCase : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def lowerCAmelCase ( self : Optional[int] , **__lowerCamelCase : int ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).qformer_tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase__ : str = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase__ : str = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) lowerCamelCase__ : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) self.assertIsInstance(processor.qformer_tokenizer , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.get_image_processor() lowerCamelCase__ : str = self.get_tokenizer() lowerCamelCase__ : Optional[int] = self.get_qformer_tokenizer() lowerCamelCase__ : Dict = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase__ : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="np" ) lowerCamelCase__ : Optional[int] = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : str = self.get_image_processor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Any = self.get_qformer_tokenizer() lowerCamelCase__ : int = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : str = "lower newer" lowerCamelCase__ : Optional[int] = processor(text=__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) lowerCamelCase__ : List[Any] = qformer_tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : int = self.get_image_processor() lowerCamelCase__ : Tuple = self.get_tokenizer() lowerCamelCase__ : List[str] = self.get_qformer_tokenizer() lowerCamelCase__ : Dict = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Dict = "lower newer" lowerCamelCase__ : List[str] = self.prepare_image_inputs() lowerCamelCase__ : List[str] = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.get_image_processor() lowerCamelCase__ : Optional[Any] = self.get_tokenizer() lowerCamelCase__ : Optional[Any] = self.get_qformer_tokenizer() lowerCamelCase__ : str = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__ : int = processor.batch_decode(__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = self.get_image_processor() lowerCamelCase__ : Dict = self.get_tokenizer() lowerCamelCase__ : Union[str, Any] = self.get_qformer_tokenizer() lowerCamelCase__ : str = InstructBlipProcessor( tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase ) lowerCamelCase__ : Dict = "lower newer" lowerCamelCase__ : Union[str, Any] = self.prepare_image_inputs() lowerCamelCase__ : Any = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual( list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
5
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
1
from math import pi, sqrt def lowercase_ ( _A : float ): """simple docstring""" if num <= 0: raise ValueError("math domain error" ) if num > 171.5: raise OverflowError("math range error" ) elif num - int(_A ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(_A ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase_ ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(_A ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() A : Dict = 1.0 while num: A : List[str] = float(input("Gamma of: ")) print(f'gamma({num}) = {gamma(num)}') print("\nEnter 0 to exit...")
5
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
1
from math import pi, sqrt, tan def lowercase_ ( _A : float ): """simple docstring""" if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values" ) return 6 * side_length**2 def lowercase_ ( _A : float , _A : float , _A : float ): """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError("surface_area_cuboid() only accepts non-negative values" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowercase_ ( _A : float ): """simple docstring""" if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values" ) return 4 * pi * radius**2 def lowercase_ ( _A : float ): """simple docstring""" if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values" ) return 3 * pi * radius**2 def lowercase_ ( _A : float , _A : float ): """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowercase_ ( _A : float , _A : float , _A : float ): """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( "surface_area_conical_frustum() only accepts non-negative values" ) lowerCamelCase__ : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowercase_ ( _A : float , _A : float ): """simple docstring""" if radius < 0 or height < 0: raise ValueError("surface_area_cylinder() only accepts non-negative values" ) return 2 * pi * radius * (height + radius) def lowercase_ ( _A : float , _A : float ): """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError("surface_area_torus() only accepts non-negative values" ) if torus_radius < tube_radius: raise ValueError( "surface_area_torus() does not support spindle or self intersecting tori" ) return 4 * pow(_A , 2 ) * torus_radius * tube_radius def lowercase_ ( _A : float , _A : float ): """simple docstring""" if length < 0 or width < 0: raise ValueError("area_rectangle() only accepts non-negative values" ) return length * width def lowercase_ ( _A : float ): """simple docstring""" if side_length < 0: raise ValueError("area_square() only accepts non-negative values" ) return side_length**2 def lowercase_ ( _A : float , _A : float ): """simple docstring""" if base < 0 or height < 0: raise ValueError("area_triangle() only accepts non-negative values" ) return (base * height) / 2 def lowercase_ ( _A : float , _A : float , _A : float ): """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("Given three sides do not form a triangle" ) lowerCamelCase__ : List[str] = (sidea + sidea + sidea) / 2 lowerCamelCase__ : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowercase_ ( _A : float , _A : float ): """simple docstring""" if base < 0 or height < 0: raise ValueError("area_parallelogram() only accepts non-negative values" ) return base * height def lowercase_ ( _A : float , _A : float , _A : float ): """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError("area_trapezium() only accepts non-negative values" ) return 1 / 2 * (basea + basea) * height def lowercase_ ( _A : float ): """simple docstring""" if radius < 0: raise ValueError("area_circle() only accepts non-negative values" ) return pi * radius**2 def lowercase_ ( _A : float , _A : float ): """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError("area_ellipse() only accepts non-negative values" ) return pi * radius_x * radius_y def lowercase_ ( _A : float , _A : float ): """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError("area_rhombus() only accepts non-negative values" ) return 1 / 2 * diagonal_a * diagonal_a def lowercase_ ( _A : int , _A : float ): """simple docstring""" if not isinstance(_A , _A ) or sides < 3: raise ValueError( "area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides" ) elif length < 0: raise ValueError( "area_reg_polygon() only accepts non-negative values as \ length of a side" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f'Rectangle: {area_rectangle(10, 20) = }') print(f'Square: {area_square(10) = }') print(f'Triangle: {area_triangle(10, 10) = }') print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }') print(f'Parallelogram: {area_parallelogram(10, 20) = }') print(f'Rhombus: {area_rhombus(10, 20) = }') print(f'Trapezium: {area_trapezium(10, 20, 30) = }') print(f'Circle: {area_circle(20) = }') print(f'Ellipse: {area_ellipse(10, 20) = }') print("\nSurface Areas of various geometric shapes: \n") print(f'Cube: {surface_area_cube(20) = }') print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }') print(f'Sphere: {surface_area_sphere(20) = }') print(f'Hemisphere: {surface_area_hemisphere(20) = }') print(f'Cone: {surface_area_cone(10, 20) = }') print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }') print(f'Cylinder: {surface_area_cylinder(10, 20) = }') print(f'Torus: {surface_area_torus(20, 10) = }') print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }') print(f'Square: {area_reg_polygon(4, 10) = }') print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
5
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
1
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
1
import heapq import sys import numpy as np A : List[Any] = tuple[int, int] class _lowercase : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = [] lowerCamelCase__ : Any = set() def lowerCAmelCase ( self : Dict ): '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float("inf" ) def lowerCAmelCase ( self : Any ): '''simple docstring''' return len(self.elements ) == 0 def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ): '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(__lowerCamelCase ) else: # update # print("update", item) lowerCamelCase__ : Tuple = [] ((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Dict ): '''simple docstring''' if item in self.set: self.set.remove(__lowerCamelCase ) lowerCamelCase__ : str = [] ((lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return self.elements[0][1] def lowerCAmelCase ( self : int ): '''simple docstring''' ((lowerCamelCase__) , (lowerCamelCase__)) : Tuple = heapq.heappop(self.elements ) self.set.remove(__lowerCamelCase ) return (priority, item) def lowercase_ ( _A : TPos , _A : TPos ): """simple docstring""" lowerCamelCase__ : Dict = np.array(_A ) lowerCamelCase__ : str = np.array(_A ) return np.linalg.norm(a - b ) def lowercase_ ( _A : TPos , _A : TPos ): """simple docstring""" return consistent_heuristic(_A , _A ) // t def lowercase_ ( _A : TPos , _A : TPos ): """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowercase_ ( _A : TPos , _A : int , _A : TPos , _A : dict[TPos, float] ): """simple docstring""" lowerCamelCase__ : Optional[int] = g_function[start] + Wa * heuristics[i](_A , _A ) return ans def lowercase_ ( _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : List[str] = np.chararray((n, n) ) for i in range(_A ): for j in range(_A ): lowerCamelCase__ : Any = "*" for i in range(_A ): for j in range(_A ): if (j, (n - 1) - i) in blocks: lowerCamelCase__ : Optional[Any] = "#" lowerCamelCase__ : Union[str, Any] = "-" lowerCamelCase__ : Union[str, Any] = back_pointer[goal] while x != start: ((lowerCamelCase__) , (lowerCamelCase__)) : str = x # print(x) lowerCamelCase__ : int = "-" lowerCamelCase__ : Dict = back_pointer[x] lowerCamelCase__ : Union[str, Any] = "-" for i in range(_A ): for j in range(_A ): if (i, j) == (0, n - 1): print(grid[i][j] , end=" " ) print("<-- End position" , end=" " ) else: print(grid[i][j] , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) print("PATH TAKEN BY THE ALGORITHM IS:-" ) lowerCamelCase__ : Optional[int] = back_pointer[goal] while x != start: print(_A , end=" " ) lowerCamelCase__ : Any = back_pointer[x] print(_A ) sys.exit() def lowercase_ ( _A : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowercase_ ( _A : List[str] , _A : List[str] , _A : Any , _A : Any , _A : Optional[Any] , _A : List[str] , _A : Dict , _A : int , ): """simple docstring""" for itera in range(_A ): open_list[itera].remove_element(_A ) # print("s", s) # print("j", j) ((lowerCamelCase__) , (lowerCamelCase__)) : Union[str, Any] = s lowerCamelCase__ : Any = (x - 1, y) lowerCamelCase__ : List[Any] = (x + 1, y) lowerCamelCase__ : Any = (x, y + 1) lowerCamelCase__ : List[str] = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_A ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_A ) lowerCamelCase__ : Optional[Any] = -1 lowerCamelCase__ : Optional[Any] = float("inf" ) if valid(_A ) and g_function[neighbours] > g_function[s] + 1: lowerCamelCase__ : Tuple = g_function[s] + 1 lowerCamelCase__ : List[Any] = s if neighbours not in close_list_anchor: open_list[0].put(_A , key(_A , 0 , _A , _A ) ) if neighbours not in close_list_inad: for var in range(1 , _A ): if key(_A , _A , _A , _A ) <= Wa * key( _A , 0 , _A , _A ): open_list[j].put( _A , key(_A , _A , _A , _A ) ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list A : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} A : Union[str, Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] A : Optional[Any] = make_common_ground() A : Optional[Any] = blocks_blk # hyper parameters A : List[Any] = 1 A : str = 1 A : Tuple = 20 A : int = 3 # one consistent and two other inconsistent # start and end destination A : str = (0, 0) A : Union[str, Any] = (n - 1, n - 1) A : Tuple = 1 def lowercase_ ( _A : TPos , _A : TPos , _A : int ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = {start: 0, goal: float("inf" )} lowerCamelCase__ : List[Any] = {start: -1, goal: -1} lowerCamelCase__ : Union[str, Any] = [] lowerCamelCase__ : str = set() for i in range(_A ): open_list.append(PriorityQueue() ) open_list[i].put(_A , key(_A , _A , _A , _A ) ) lowerCamelCase__ : list[int] = [] lowerCamelCase__ : list[int] = [] while open_list[0].minkey() < float("inf" ): for i in range(1 , _A ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf" ): do_something(_A , _A , _A ) else: lowerCamelCase__ , lowerCamelCase__ : List[str] = open_list[i].top_show() visited.add(_A ) expand_state( _A , _A , _A , _A , _A , _A , _A , _A , ) close_list_inad.append(_A ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf" ): do_something(_A , _A , _A ) else: lowerCamelCase__ : Any = open_list[0].top_show() visited.add(_A ) expand_state( _A , 0 , _A , _A , _A , _A , _A , _A , ) close_list_anchor.append(_A ) print("No path found to goal" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_A ): if (j, i) in blocks: print("#" , end=" " ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*" , end=" " ) else: print("-" , end=" " ) else: print("*" , end=" " ) if (j, i) == (n - 1, n - 1): print("<-- End position" , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
def lowercase_ ( _A : int ): """simple docstring""" lowerCamelCase__ : Dict = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowercase_ ( _A : int = 5000 ): """simple docstring""" lowerCamelCase__ : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , _A )] for i, pentagonal_i in enumerate(_A ): for j in range(_A , len(_A ) ): lowerCamelCase__ : str = pentagonal_nums[j] lowerCamelCase__ : Any = pentagonal_i + pentagonal_j lowerCamelCase__ : Tuple = pentagonal_j - pentagonal_i if is_pentagonal(_A ) and is_pentagonal(_A ): return b return -1 if __name__ == "__main__": print(f'{solution() = }')
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging A : Union[str, Any] = logging.get_logger(__name__) def lowercase_ ( _A : int ): """simple docstring""" lowerCamelCase__ : Tuple = r"\w+[.]\d+" lowerCamelCase__ : Optional[Any] = re.findall(_A , _A ) for pat in pats: lowerCamelCase__ : Tuple = key.replace(_A , "_".join(pat.split("." ) ) ) return key def lowercase_ ( _A : Any , _A : int , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Tuple = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCamelCase__ : Tuple = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCamelCase__ : Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCamelCase__ : Dict = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase__ : Optional[int] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCamelCase__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase__ : str = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCamelCase__ : Optional[int] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase_ ( _A : Any , _A : str , _A : str=42 ): """simple docstring""" lowerCamelCase__ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCamelCase__ : List[Any] = flax_model.init_weights(PRNGKey(_A ) ) lowerCamelCase__ : int = flatten_dict(_A ) lowerCamelCase__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase__ : Any = rename_key(_A ) lowerCamelCase__ : List[str] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCamelCase__ , lowerCamelCase__ : List[str] = rename_key_and_reshape_tensor(_A , _A , _A ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCamelCase__ : Optional[Any] = jnp.asarray(_A ) return unflatten_dict(_A )
5
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A : Any = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = ["XLNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = ["XLNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A : List[str] = "pt" elif is_tf_available(): A : str = "tf" else: A : str = "jax" class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = PerceiverTokenizer A__ = False def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' super().setUp() lowerCamelCase__ : Tuple = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def lowerCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : str ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : int=20 , __lowerCamelCase : Optional[Any]=5 ): '''simple docstring''' lowerCamelCase__ : Tuple = [] for i in range(len(__lowerCamelCase ) ): try: lowerCamelCase__ : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCamelCase__ : Union[str, Any] = list(filter(lambda __lowerCamelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __lowerCamelCase ) ) lowerCamelCase__ : List[str] = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) ) if max_length is not None and len(__lowerCamelCase ) > max_length: lowerCamelCase__ : Union[str, Any] = toks[:max_length] if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0: while len(__lowerCamelCase ) < min_length: lowerCamelCase__ : Dict = toks + toks # toks_str = [t[1] for t in toks] lowerCamelCase__ : Tuple = [t[0] for t in toks] # Ensure consistency lowerCamelCase__ : Optional[int] = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) if " " not in output_txt and len(__lowerCamelCase ) > 1: lowerCamelCase__ : Any = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase ) ) if with_prefix_space: lowerCamelCase__ : Union[str, Any] = " " + output_txt lowerCamelCase__ : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) return output_txt, output_ids def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.perceiver_tokenizer lowerCamelCase__ : Optional[Any] = "Unicode €." lowerCamelCase__ : Dict = tokenizer(__lowerCamelCase ) lowerCamelCase__ : List[str] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"] , __lowerCamelCase ) # decoding lowerCamelCase__ : Any = tokenizer.decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , "[CLS]Unicode €.[SEP]" ) lowerCamelCase__ : int = tokenizer("e è é ê ë" ) lowerCamelCase__ : List[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"] , __lowerCamelCase ) # decoding lowerCamelCase__ : str = tokenizer.decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.perceiver_tokenizer lowerCamelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowerCamelCase__ : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on lowerCamelCase__ : str = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) if FRAMEWORK != "jax": lowerCamelCase__ : Union[str, Any] = list(batch.input_ids.numpy()[0] ) else: lowerCamelCase__ : int = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.perceiver_tokenizer lowerCamelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : List[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , __lowerCamelCase ) self.assertIn("attention_mask" , __lowerCamelCase ) self.assertNotIn("decoder_input_ids" , __lowerCamelCase ) self.assertNotIn("decoder_attention_mask" , __lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = self.perceiver_tokenizer lowerCamelCase__ : Tuple = [ "Summary of the text.", "Another summary.", ] lowerCamelCase__ : List[Any] = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCamelCase__ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : int = tempfile.mkdtemp() lowerCamelCase__ : List[Any] = " He is very happy, UNwant\u00E9d,running" lowerCamelCase__ : Any = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) lowerCamelCase__ : int = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase__ : str = tempfile.mkdtemp() lowerCamelCase__ : Optional[Any] = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) lowerCamelCase__ : Union[str, Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: lowerCamelCase__ : List[str] = json.load(__lowerCamelCase ) lowerCamelCase__ : int = [f"<extra_id_{i}>" for i in range(125 )] lowerCamelCase__ : List[Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] lowerCamelCase__ : List[str] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained( __lowerCamelCase , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase__ : int = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__lowerCamelCase )] lowerCamelCase__ : Any = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , "�" ) def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' pass def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCamelCase__ : Union[str, Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] lowerCamelCase__ : str = tokenizer.convert_tokens_to_string(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
5
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
1
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar A : Dict = TypeVar("T") A : Any = TypeVar("U") class _lowercase ( Generic[T, U]): """simple docstring""" def __init__( self : Optional[int] , __lowerCamelCase : T | None , __lowerCamelCase : U | None ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = key lowerCamelCase__ : Tuple = val lowerCamelCase__ : DoubleLinkedListNode[T, U] | None = None lowerCamelCase__ : DoubleLinkedListNode[T, U] | None = None def __repr__( self : str ): '''simple docstring''' return ( f"Node: key: {self.key}, val: {self.val}, " f"has next: {bool(self.next )}, has prev: {bool(self.prev )}" ) class _lowercase ( Generic[T, U]): """simple docstring""" def __init__( self : Tuple ): '''simple docstring''' lowerCamelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ : Dict = self.rear, self.head def __repr__( self : Dict ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ["DoubleLinkedList"] lowerCamelCase__ : List[Any] = self.head while node.next is not None: rep.append(str(__lowerCamelCase ) ) lowerCamelCase__ : List[Any] = node.next rep.append(str(self.rear ) ) return ",\n ".join(__lowerCamelCase ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' lowerCamelCase__ : str = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowerCamelCase__ : Tuple = node lowerCamelCase__ : List[Any] = previous lowerCamelCase__ : Any = node lowerCamelCase__ : Optional[int] = self.rear def lowerCAmelCase ( self : Tuple , __lowerCamelCase : DoubleLinkedListNode[T, U] ): '''simple docstring''' if node.prev is None or node.next is None: return None lowerCamelCase__ : Optional[Any] = node.next lowerCamelCase__ : List[str] = node.prev lowerCamelCase__ : Any = None lowerCamelCase__ : Optional[Any] = None return node class _lowercase ( Generic[T, U]): """simple docstring""" A__ = {} def __init__( self : List[Any] , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : DoubleLinkedList[T, U] = DoubleLinkedList() lowerCamelCase__ : Optional[int] = capacity lowerCamelCase__ : Dict = 0 lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : Tuple = 0 lowerCamelCase__ : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self : Union[str, Any] ): '''simple docstring''' return ( f"CacheInfo(hits={self.hits}, misses={self.miss}, " f"capacity={self.capacity}, current size={self.num_keys})" ) def __contains__( self : Tuple , __lowerCamelCase : T ): '''simple docstring''' return key in self.cache def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : T ): '''simple docstring''' if key in self.cache: self.hits += 1 lowerCamelCase__ : DoubleLinkedListNode[T, U] = self.cache[key] lowerCamelCase__ : Optional[Any] = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(__lowerCamelCase ) return node.val self.miss += 1 return None def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : T , __lowerCamelCase : U ): '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowerCamelCase__ : Tuple = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(__lowerCamelCase ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowerCamelCase__ : Optional[int] = DoubleLinkedListNode(__lowerCamelCase , __lowerCamelCase ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowerCamelCase__ : List[str] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowerCamelCase__ : Any = value self.list.add(__lowerCamelCase ) @classmethod def lowerCAmelCase ( cls : List[Any] , __lowerCamelCase : int = 128 ): '''simple docstring''' def cache_decorator_inner(__lowerCamelCase : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*__lowerCamelCase : T ) -> U: if func not in cls.decorator_function_to_instance_map: lowerCamelCase__ : int = LRUCache(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowerCamelCase__ : Optional[int] = func(*__lowerCamelCase ) cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCamelCase ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(__lowerCamelCase , "cache_info" , __lowerCamelCase ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
5
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class _lowercase ( unittest.TestCase): """simple docstring""" def __init__( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 32 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : bool = True , __lowerCamelCase : str=7 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=3 , ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = parent lowerCamelCase__ : Optional[int] = do_resize lowerCamelCase__ : Tuple = size if size is not None else {"shortest_edge": 288} lowerCamelCase__ : Optional[Any] = size_divisor lowerCamelCase__ : int = do_rescale lowerCamelCase__ : List[Any] = rescale_factor lowerCamelCase__ : Union[str, Any] = do_normalize lowerCamelCase__ : Any = do_center_crop lowerCamelCase__ : Dict = image_mean lowerCamelCase__ : Optional[int] = image_std lowerCamelCase__ : Dict = do_pad lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Optional[Any] = num_channels lowerCamelCase__ : int = min_resolution lowerCamelCase__ : List[str] = max_resolution def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str=False ): '''simple docstring''' if not batched: lowerCamelCase__ : str = self.size["shortest_edge"] lowerCamelCase__ : List[str] = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = image.size else: lowerCamelCase__ , lowerCamelCase__ : Any = image.shape[1], image.shape[2] lowerCamelCase__ : Union[str, Any] = size / min(__lowerCamelCase , __lowerCamelCase ) if h < w: lowerCamelCase__ , lowerCamelCase__ : Optional[int] = size, scale * w else: lowerCamelCase__ , lowerCamelCase__ : List[Any] = scale * h, size lowerCamelCase__ : Any = int((1333 / 800) * size ) if max(__lowerCamelCase , __lowerCamelCase ) > max_size: lowerCamelCase__ : Optional[int] = max_size / max(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = newh * scale lowerCamelCase__ : Optional[Any] = neww * scale lowerCamelCase__ , lowerCamelCase__ : List[str] = int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase__ , lowerCamelCase__ : str = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase__ : Optional[int] = [] for image in image_inputs: lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase__ : List[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] lowerCamelCase__ : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : int = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size" ) ) self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCamelCase__ : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
5
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = "ssube/stable-diffusion-x4-upscaler-onnx" def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict=0 ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCamelCase ) ) lowerCamelCase__ : str = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : List[Any] = self.get_dummy_inputs() lowerCamelCase__ : List[Any] = pipe(**__lowerCamelCase ).images lowerCamelCase__ : Any = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : str = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase__ : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : int = self.get_dummy_inputs() lowerCamelCase__ : Tuple = pipe(**__lowerCamelCase ).images lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : Dict = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = self.get_dummy_inputs() lowerCamelCase__ : Dict = pipe(**__lowerCamelCase ).images lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : List[Any] = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase__ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Any = self.get_dummy_inputs() lowerCamelCase__ : Tuple = pipe(**__lowerCamelCase ).images lowerCamelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : Optional[Any] = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase__ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : str = self.get_dummy_inputs() lowerCamelCase__ : Dict = pipe(**__lowerCamelCase ).images lowerCamelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : Optional[int] = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Any = ort.SessionOptions() lowerCamelCase__ : List[Any] = False return options def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCamelCase__ : List[str] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCamelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : List[str] = "A fantasy landscape, trending on artstation" lowerCamelCase__ : Any = torch.manual_seed(0 ) lowerCamelCase__ : Union[str, Any] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type="np" , ) lowerCamelCase__ : List[Any] = output.images lowerCamelCase__ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase__ : Optional[int] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCamelCase__ : Tuple = init_image.resize((128, 128) ) lowerCamelCase__ : str = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) lowerCamelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = "A fantasy landscape, trending on artstation" lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 ) lowerCamelCase__ : List[str] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCamelCase , output_type="np" , ) lowerCamelCase__ : Optional[Any] = output.images lowerCamelCase__ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase__ : str = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
5
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "swin2sr" A__ = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=64 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=180 , __lowerCamelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : str=8 , __lowerCamelCase : List[str]=2.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Any=False , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-5 , __lowerCamelCase : str=2 , __lowerCamelCase : int=1.0 , __lowerCamelCase : int="1conv" , __lowerCamelCase : str="pixelshuffle" , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Any = image_size lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : Optional[Any] = num_channels lowerCamelCase__ : List[str] = embed_dim lowerCamelCase__ : Optional[int] = depths lowerCamelCase__ : int = len(__lowerCamelCase ) lowerCamelCase__ : str = num_heads lowerCamelCase__ : Optional[Any] = window_size lowerCamelCase__ : List[str] = mlp_ratio lowerCamelCase__ : Dict = qkv_bias lowerCamelCase__ : Optional[Any] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = drop_path_rate lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[Any] = use_absolute_embeddings lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Any = upscale lowerCamelCase__ : Union[str, Any] = img_range lowerCamelCase__ : Dict = resi_connection lowerCamelCase__ : Optional[Any] = upsampler
5
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A : Union[str, Any] = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Any = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
1
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() A : int = logging.get_logger(__name__) A : Dict = ["model.decoder.embed_positions.weights"] def lowercase_ ( _A : Optional[Any] ): """simple docstring""" if "emb" in name: lowerCamelCase__ : Tuple = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: lowerCamelCase__ : Dict = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: lowerCamelCase__ : List[str] = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: lowerCamelCase__ : List[Any] = name.replace("linear1" , "fc1" ) if "linear2" in name: lowerCamelCase__ : List[str] = name.replace("linear2" , "fc2" ) if "norm1" in name: lowerCamelCase__ : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: lowerCamelCase__ : Optional[int] = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: lowerCamelCase__ : Tuple = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: lowerCamelCase__ : Tuple = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: lowerCamelCase__ : str = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCamelCase__ : List[str] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowercase_ ( _A : OrderedDict , _A : int ): """simple docstring""" lowerCamelCase__ : int = list(state_dict.keys() ) lowerCamelCase__ : Optional[Any] = {} for key in keys: lowerCamelCase__ : Optional[Any] = state_dict.pop(_A ) lowerCamelCase__ : Any = rename_keys(_A ) if "in_proj_weight" in key: # split fused qkv proj lowerCamelCase__ : List[str] = val[:hidden_size, :] lowerCamelCase__ : str = val[hidden_size : 2 * hidden_size, :] lowerCamelCase__ : List[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCamelCase__ : str = val else: lowerCamelCase__ : Dict = val return state_dict, enc_dec_proj_state_dict def lowercase_ ( _A : str ): """simple docstring""" if checkpoint == "small": # default config values lowerCamelCase__ : Union[str, Any] = 1024 lowerCamelCase__ : List[Any] = 24 lowerCamelCase__ : int = 16 elif checkpoint == "medium": lowerCamelCase__ : Tuple = 1536 lowerCamelCase__ : Union[str, Any] = 48 lowerCamelCase__ : Optional[Any] = 24 elif checkpoint == "large": lowerCamelCase__ : Any = 2048 lowerCamelCase__ : Optional[Any] = 48 lowerCamelCase__ : Tuple = 32 else: raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) lowerCamelCase__ : Tuple = MusicgenDecoderConfig( hidden_size=_A , ffn_dim=hidden_size * 4 , num_hidden_layers=_A , num_attention_heads=_A , ) return config @torch.no_grad() def lowercase_ ( _A : List[str] , _A : Any=None , _A : Dict=None , _A : str="cpu" ): """simple docstring""" lowerCamelCase__ : Dict = MusicGen.get_pretrained(_A , device=_A ) lowerCamelCase__ : Optional[Any] = decoder_config_from_checkpoint(_A ) lowerCamelCase__ : str = fairseq_model.lm.state_dict() lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = rename_state_dict( _A , hidden_size=decoder_config.hidden_size ) lowerCamelCase__ : Union[str, Any] = TaEncoderModel.from_pretrained("t5-base" ) lowerCamelCase__ : Dict = EncodecModel.from_pretrained("facebook/encodec_32khz" ) lowerCamelCase__ : List[Any] = MusicgenForCausalLM(_A ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCamelCase__ , lowerCamelCase__ : Optional[int] = decoder.load_state_dict(_A , strict=_A ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(_A ) if len(_A ) > 0: raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" ) if len(_A ) > 0: raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model lowerCamelCase__ : Optional[int] = MusicgenForConditionalGeneration(text_encoder=_A , audio_encoder=_A , decoder=_A ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(_A ) # check we can do a forward pass lowerCamelCase__ : Optional[int] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCamelCase__ : Any = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(input_ids=_A , decoder_input_ids=_A ).logits if logits.shape != (8, 1, 2048): raise ValueError("Incorrect shape for logits" ) # now construct the processor lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("t5-base" ) lowerCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) lowerCamelCase__ : List[str] = MusicgenProcessor(feature_extractor=_A , tokenizer=_A ) # set the appropriate bos/pad token ids lowerCamelCase__ : Union[str, Any] = 2048 lowerCamelCase__ : Dict = 2048 # set other default generation config params lowerCamelCase__ : Optional[Any] = int(30 * audio_encoder.config.frame_rate ) lowerCamelCase__ : Dict = True lowerCamelCase__ : int = 3.0 if pytorch_dump_folder is not None: Path(_A ).mkdir(exist_ok=_A ) logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(_A ) processor.save_pretrained(_A ) if repo_id: logger.info(F"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(_A ) processor.push_to_hub(_A ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) A : Optional[Any] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
5
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging A : List[str] = logging.get_logger(__name__) def lowercase_ ( _A : Union[tf.Tensor, np.ndarray] ): """simple docstring""" if isinstance(_A , np.ndarray ): return list(tensor.shape ) lowerCamelCase__ : Union[str, Any] = tf.shape(_A ) if tensor.shape == tf.TensorShape(_A ): return dynamic lowerCamelCase__ : Optional[int] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_A )] def lowercase_ ( _A : tf.Tensor , _A : Optional[int] = None , _A : Optional[str] = None ): """simple docstring""" return tf.nn.softmax(logits=logits + 1E-9 , axis=_A , name=_A ) def lowercase_ ( _A : Tuple , _A : Dict , _A : Any , _A : str=1E-5 , _A : Optional[Any]=-1 ): """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_A , _A ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = tf.nn.moments(_A , axes=[axis] , keepdims=_A ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowerCamelCase__ : List[Any] = [1] * inputs.shape.rank lowerCamelCase__ : Union[str, Any] = shape_list(_A )[axis] lowerCamelCase__ : Tuple = tf.reshape(_A , _A ) lowerCamelCase__ : List[Any] = tf.reshape(_A , _A ) # Compute layer normalization using the batch_normalization # function. lowerCamelCase__ : str = tf.nn.batch_normalization( _A , _A , _A , offset=_A , scale=_A , variance_epsilon=_A , ) return outputs def lowercase_ ( _A : int , _A : Optional[Any]=0 , _A : Optional[Any]=-1 ): """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowerCamelCase__ : Dict = tf.shape(_A ) lowerCamelCase__ : List[str] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowerCamelCase__ : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(_A , _A ) def lowercase_ ( _A : tf.Tensor ): """simple docstring""" if not isinstance(_A , tf.Tensor ): lowerCamelCase__ : str = tf.convert_to_tensor(_A ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowerCamelCase__ : Union[str, Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowerCamelCase__ : Optional[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowerCamelCase__ : Any = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase_ ( _A : tf.Tensor , _A : int , _A : str = "input_ids" ): """simple docstring""" tf.debugging.assert_less( _A , tf.cast(_A , dtype=tensor.dtype ) , message=( F"The maximum value of {tensor_name} ({tf.math.reduce_max(_A )}) must be smaller than the embedding " F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ) , ) def lowercase_ ( _A : int , _A : List[Any] , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Tuple = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowerCamelCase__ : Union[str, Any] = [x for x in data if len(_A ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " F"bytes: {bad_attributes}" ) lowerCamelCase__ : List[Any] = np.asarray(_A ) lowerCamelCase__ : Any = 1 lowerCamelCase__ : Dict = np.array_split(_A , _A ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowerCamelCase__ : Optional[Any] = np.array_split(_A , _A ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_A ): lowerCamelCase__ : str = chunk_data else: lowerCamelCase__ : int = data def lowercase_ ( _A : str , _A : Any ): """simple docstring""" if name in group.attrs: lowerCamelCase__ : str = [n.decode("utf8" ) if hasattr(_A , "decode" ) else n for n in group.attrs[name]] else: lowerCamelCase__ : List[Any] = [] lowerCamelCase__ : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(_A , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase_ ( _A : Dict ): """simple docstring""" def _expand_single_ad_tensor(_A : Dict ): if isinstance(_A , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_A , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , _A )
5
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[str] = 1 lowerCamelCase__ : Any = 3 lowerCamelCase__ : Tuple = (32, 32) lowerCamelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase ) return image @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__lowerCamelCase ) @property def lowerCAmelCase ( self : Any ): '''simple docstring''' def extract(*__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ): class _lowercase : """simple docstring""" def __init__( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = torch.ones([0] ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ): '''simple docstring''' self.pixel_values.to(__lowerCamelCase ) return self return Out() return extract def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Union[str, Any] = self.dummy_cond_unet lowerCamelCase__ : List[str] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , ) lowerCamelCase__ : Dict = self.dummy_vae lowerCamelCase__ : Any = self.dummy_text_encoder lowerCamelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk lowerCamelCase__ : str = StableDiffusionPipeline( unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , ) lowerCamelCase__ : Dict = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = "A painting of a squirrel eating a burger" lowerCamelCase__ : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : Optional[Any] = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : Optional[int] = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Optional[int] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : List[Any] = self.dummy_cond_unet lowerCamelCase__ : Tuple = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) lowerCamelCase__ : Dict = self.dummy_vae lowerCamelCase__ : Any = self.dummy_text_encoder lowerCamelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk lowerCamelCase__ : str = StableDiffusionPipeline( unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , ) lowerCamelCase__ : Optional[int] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Dict = "A painting of a squirrel eating a burger" lowerCamelCase__ : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : Any = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) lowerCamelCase__ : int = output.images lowerCamelCase__ : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) lowerCamelCase__ : List[str] = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : List[Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__lowerCamelCase ) assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert isinstance(pipe.scheduler , __lowerCamelCase ) assert pipe.safety_checker is None lowerCamelCase__ : int = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = StableDiffusionPipeline.from_pretrained(__lowerCamelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCamelCase__ : str = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_cond_unet lowerCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) lowerCamelCase__ : Tuple = self.dummy_vae lowerCamelCase__ : int = self.dummy_text_encoder lowerCamelCase__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 lowerCamelCase__ : List[str] = unet.half() lowerCamelCase__ : Any = vae.half() lowerCamelCase__ : Dict = bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase__ : Union[str, Any] = StableDiffusionPipeline( unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , ) lowerCamelCase__ : Optional[Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Any = "A painting of a squirrel eating a burger" lowerCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Any ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase ) lowerCamelCase__ : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ : str = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Dict = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) lowerCamelCase__ : List[Any] = 4003660346 lowerCamelCase__ : str = 7 # without safety guidance (sld_guidance_scale = 0) lowerCamelCase__ : Optional[Any] = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : List[Any] = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCamelCase__ : Dict = output.images lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[Any] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) lowerCamelCase__ : Any = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCamelCase__ : Union[str, Any] = output.images lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase ) lowerCamelCase__ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) lowerCamelCase__ : str = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : List[Any] = "padme amidala taking a bath artwork, safe for work, no nudity" lowerCamelCase__ : Union[str, Any] = 2734971755 lowerCamelCase__ : List[Any] = 7 lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : Any = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCamelCase__ : Optional[int] = output.images lowerCamelCase__ : str = image[0, -3:, -3:, -1] lowerCamelCase__ : str = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : int = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCamelCase__ : Optional[int] = output.images lowerCamelCase__ : Dict = image[0, -3:, -3:, -1] lowerCamelCase__ : int = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) lowerCamelCase__ : Union[str, Any] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) lowerCamelCase__ : Optional[int] = 1044355234 lowerCamelCase__ : Optional[Any] = 12 lowerCamelCase__ : Optional[Any] = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : Any = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) lowerCamelCase__ : Optional[Any] = output.images lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCamelCase__ : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 lowerCamelCase__ : Optional[Any] = torch.manual_seed(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = sd_pipe( [prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) lowerCamelCase__ : Union[str, Any] = output.images lowerCamelCase__ : Dict = image[0, -3:, -3:, -1] lowerCamelCase__ : Union[str, Any] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
5
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
1
def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Dict = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCamelCase__ : Dict = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCamelCase__ : Tuple = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCamelCase__ , lowerCamelCase__ : Any = i, i + z_result[i] - 1 return z_result def lowercase_ ( _A : int , _A : list[int] , _A : str ): """simple docstring""" return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def lowercase_ ( _A : str , _A : str ): """simple docstring""" lowerCamelCase__ : int = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCamelCase__ : Optional[int] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
5
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
1
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType A : List[Any] = get_logger(__name__) def lowercase_ ( _A : List[str] , _A : Dict , _A : Optional[int] , _A : int , _A : str=0 ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : int = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : List[str] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" lowerCamelCase__ : int = os.path.join(_A , _A ) if accelerator.process_index == 0: logger.info(F"Saving model to {output_model_file}" ) torch.save(_A , _A ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : List[str] = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) lowerCamelCase__ : List[Any] = os.path.join(_A , _A ) logger.info(F"Saving model to {output_model_file}" ) torch.save(_A , _A ) logger.info(F"Model saved to {output_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : Any = os.path.join(_A , F"{MODEL_NAME}_{model_index}" ) os.makedirs(_A , exist_ok=_A ) logger.info(F"Saving model to {ckpt_dir}" ) lowerCamelCase__ : List[str] = {"model": state_dict} dist_cp.save_state_dict( state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(F"Model saved to {ckpt_dir}" ) def lowercase_ ( _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Any , _A : int=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(_A ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return lowerCamelCase__ : Optional[Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin" lowerCamelCase__ : Dict = os.path.join(_A , _A ) logger.info(F"Loading model from {input_model_file}" ) lowerCamelCase__ : Union[str, Any] = torch.load(_A ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: lowerCamelCase__ : int = ( F"{MODEL_NAME}_rank{accelerator.process_index}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin" ) lowerCamelCase__ : Optional[Any] = os.path.join(_A , _A ) logger.info(F"Loading model from {input_model_file}" ) lowerCamelCase__ : List[Any] = torch.load(_A ) logger.info(F"Model loaded from {input_model_file}" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: lowerCamelCase__ : str = ( os.path.join(_A , F"{MODEL_NAME}_{model_index}" ) if F"{MODEL_NAME}" not in input_dir else input_dir ) logger.info(F"Loading model from {ckpt_dir}" ) lowerCamelCase__ : Tuple = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , ) lowerCamelCase__ : str = state_dict["model"] logger.info(F"Model loaded from {ckpt_dir}" ) model.load_state_dict(_A ) def lowercase_ ( _A : Dict , _A : str , _A : List[Any] , _A : List[str] , _A : List[Any] , _A : Tuple=0 ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): lowerCamelCase__ : Optional[int] = FSDP.optim_state_dict(_A , _A ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: lowerCamelCase__ : Union[str, Any] = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) lowerCamelCase__ : str = os.path.join(_A , _A ) logger.info(F"Saving Optimizer state to {output_optimizer_file}" ) torch.save(_A , _A ) logger.info(F"Optimizer state saved in {output_optimizer_file}" ) else: lowerCamelCase__ : Tuple = os.path.join(_A , F"{OPTIMIZER_NAME}_{optimizer_index}" ) os.makedirs(_A , exist_ok=_A ) logger.info(F"Saving Optimizer state to {ckpt_dir}" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , ) logger.info(F"Optimizer state saved in {ckpt_dir}" ) def lowercase_ ( _A : int , _A : Tuple , _A : Optional[Any] , _A : int , _A : Any , _A : Dict=0 ): """simple docstring""" accelerator.wait_for_everyone() with FSDP.state_dict_type( _A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: lowerCamelCase__ : str = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: lowerCamelCase__ : List[Any] = ( F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin" ) lowerCamelCase__ : Optional[int] = os.path.join(_A , _A ) logger.info(F"Loading Optimizer state from {input_optimizer_file}" ) lowerCamelCase__ : Union[str, Any] = torch.load(_A ) logger.info(F"Optimizer state loaded from {input_optimizer_file}" ) else: lowerCamelCase__ : List[Any] = ( os.path.join(_A , F"{OPTIMIZER_NAME}_{optimizer_index}" ) if F"{OPTIMIZER_NAME}" not in input_dir else input_dir ) logger.info(F"Loading Optimizer from {ckpt_dir}" ) lowerCamelCase__ : Dict = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_A ) , ) lowerCamelCase__ : Any = optim_state["optimizer"] logger.info(F"Optimizer loaded from {ckpt_dir}" ) lowerCamelCase__ : Any = FSDP.optim_state_dict_to_load(_A , _A , _A ) optimizer.load_state_dict(_A )
5
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A : Dict = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : str ): '''simple docstring''' super().__init__() self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase ) @torch.no_grad() def __call__( self : str , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : int = 50 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ): '''simple docstring''' lowerCamelCase__ : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCamelCase , ) lowerCamelCase__ : str = image.to(self.device ) # set step values self.scheduler.set_timesteps(__lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase__ : Any = self.unet(__lowerCamelCase , __lowerCamelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase__ : Tuple = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample lowerCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase__ : str = self.numpy_to_pil(__lowerCamelCase ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=__lowerCamelCase ), "This is a local test"
5
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) A : Any = getLogger(__name__) def lowercase_ ( _A : str , _A : str , _A : str , _A : int = 8 , _A : int = 1024 , _A : List[str]="val" , _A : Tuple=None , _A : Union[str, Any]=False , _A : Dict="summarization" , _A : str=None , _A : List[Any]=1 , _A : Dict = None , _A : int="" , **_A : str , ): """simple docstring""" lowerCamelCase__ : Tuple = str(_A ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=_A ) lowerCamelCase__ : Optional[Any] = Path(_A ) lowerCamelCase__ : Tuple = save_dir.joinpath(F"rank_{local_rank}_output.json" ) torch.cuda.set_device(_A ) lowerCamelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ).cuda() if fpaa: lowerCamelCase__ : Dict = model.half() # determine if we need to increase num_beams use_task_specific_params(_A , _A ) # update config with task specific params lowerCamelCase__ : Tuple = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: lowerCamelCase__ : List[Any] = num_return_sequences lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. if max_source_length is None: lowerCamelCase__ : List[str] = tokenizer.model_max_length if prefix is None: lowerCamelCase__ : Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or "" lowerCamelCase__ : Dict = SeqaSeqDataset( _A , _A , _A , max_target_length=1024 , type_path=_A , n_obs=_A , prefix=_A , **_A , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. lowerCamelCase__ : Optional[Any] = ds.make_sortish_sampler(_A , distributed=_A , add_extra_examples=_A , shuffle=_A ) lowerCamelCase__ : str = DataLoader(_A , sampler=_A , batch_size=_A , collate_fn=ds.collate_fn ) lowerCamelCase__ : List[str] = [] for batch in tqdm(_A ): lowerCamelCase__ : Optional[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=_A , num_beams=_A , **_A , ) lowerCamelCase__ : Any = tokenizer.batch_decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) lowerCamelCase__ : str = batch["ids"] if num_return_sequences > 1: lowerCamelCase__ : str = chunks(_A , _A ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(_A ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(_A , _A ) return results, sampler.num_replicas def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=_A , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=_A , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=_A , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=_A , default=_A ) parser.add_argument( "--type_path" , type=_A , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=_A , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=_A , default=8 , required=_A , help="batch size" ) parser.add_argument( "--local_rank" , type=_A , default=-1 , required=_A , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=_A , default=_A , required=_A , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=_A , default=1 , required=_A , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=_A , default=600 , required=_A , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=_A , default=_A , required=_A ) parser.add_argument("--tgt_lang" , type=_A , default=_A , required=_A ) parser.add_argument( "--prefix" , type=_A , required=_A , default=_A , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) lowerCamelCase__ : List[Any] = time.time() lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_known_args() lowerCamelCase__ : Optional[int] = parse_numeric_n_bool_cl_kwargs(_A ) if generate_kwargs and args.local_rank <= 0: print(F"parsed the following generate kwargs: {generate_kwargs}" ) lowerCamelCase__ : int = Path(args.save_dir + "_tmp" ) Path(_A ).mkdir(exist_ok=_A ) # this handles locking. lowerCamelCase__ : Any = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(F"Found files at {json_save_dir} please move or remove them." ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. lowerCamelCase__ : Dict = {} if args.src_lang is not None: lowerCamelCase__ : Dict = args.src_lang if args.tgt_lang is not None: lowerCamelCase__ : Any = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=_A ) lowerCamelCase__ , lowerCamelCase__ : Dict = eval_data_dir( args.data_dir , _A , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_A , **_A , ) if args.local_rank <= 0: lowerCamelCase__ : Tuple = Path(args.save_dir ) save_dir.mkdir(exist_ok=_A ) lowerCamelCase__ : Optional[int] = gather_results_from_each_node(_A , _A , args.sync_timeout ) lowerCamelCase__ : int = combine_partial_results(_A ) if args.num_return_sequences > 1: lowerCamelCase__ : int = save_dir.joinpath("pseudolabel_results.json" ) print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" ) save_json(_A , _A ) return lowerCamelCase__ : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(_A ) as f: lowerCamelCase__ : Union[str, Any] = [x.rstrip() for x in f.readlines()][: len(_A )] # Calculate metrics, save metrics, and save _generations.txt lowerCamelCase__ : Optional[Any] = "translation" in args.task lowerCamelCase__ : int = calculate_bleu if calc_bleu else calculate_rouge lowerCamelCase__ : List[str] = "bleu" if calc_bleu else "rouge" lowerCamelCase__ : Dict = score_fn(_A , _A ) lowerCamelCase__ : int = len(_A ) lowerCamelCase__ : Optional[int] = time.time() - start_time lowerCamelCase__ : str = round(runtime / metrics["n_obs"] , 4 ) lowerCamelCase__ : Any = num_replicas # TODO(@stas00): add whatever metadata to metrics lowerCamelCase__ : Any = save_dir.joinpath(F"{args.type_path}_{metric_name}.json" ) save_json(_A , _A , indent=_A ) print(_A ) write_txt_file(_A , save_dir.joinpath(F"{args.type_path}_generations.txt" ) ) if args.debug: write_txt_file(_A , save_dir.joinpath(F"{args.type_path}.target" ) ) else: shutil.rmtree(_A ) def lowercase_ ( _A : List[str] ): """simple docstring""" lowerCamelCase__ : int = [] for partial_result in partial_results: records.extend(_A ) lowerCamelCase__ : Optional[int] = sorted(_A , key=lambda _A : x["id"] ) lowerCamelCase__ : Dict = [x["pred"] for x in records] return preds def lowercase_ ( _A : str , _A : str , _A : Dict ): """simple docstring""" lowerCamelCase__ : str = time.time() logger.info("waiting for all nodes to finish" ) lowerCamelCase__ : Dict = None while (time.time() - start_wait) < timeout: lowerCamelCase__ : Dict = list(save_dir.glob("rank_*.json" ) ) if len(_A ) < num_replicas: continue try: # make sure all json files are fully saved lowerCamelCase__ : Tuple = lmap(_A , _A ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
5
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A : Any = logging.get_logger(__name__) class _lowercase ( lowercase__ , lowercase__): """simple docstring""" A__ = "maskformer-swin" A__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Any , __lowerCamelCase : Dict=224 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : int=3 , __lowerCamelCase : int=96 , __lowerCamelCase : Tuple=[2, 2, 6, 2] , __lowerCamelCase : Tuple=[3, 6, 12, 24] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Union[str, Any]=4.0 , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : List[Any]=1E-5 , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : int = image_size lowerCamelCase__ : Dict = patch_size lowerCamelCase__ : Optional[int] = num_channels lowerCamelCase__ : Optional[Any] = embed_dim lowerCamelCase__ : str = depths lowerCamelCase__ : str = len(__lowerCamelCase ) lowerCamelCase__ : Dict = num_heads lowerCamelCase__ : Optional[Any] = window_size lowerCamelCase__ : str = mlp_ratio lowerCamelCase__ : Tuple = qkv_bias lowerCamelCase__ : Optional[Any] = hidden_dropout_prob lowerCamelCase__ : List[str] = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = drop_path_rate lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Optional[Any] = use_absolute_embeddings lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase__ : List[str] = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) ) lowerCamelCase__ : Optional[Any] = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] lowerCamelCase__ , lowerCamelCase__ : int = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
1
import argparse A : Tuple = "docs/source/_static/js/custom.js" def lowercase_ ( _A : List[Any] ): """simple docstring""" with open(_A , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase__ : Tuple = f.readlines() lowerCamelCase__ : str = 0 # First let's put the right version while not lines[index].startswith("const stableVersion =" ): index += 1 lowerCamelCase__ : int = F"const stableVersion = \"v{version}\"\n" # Then update the dictionary while not lines[index].startswith("const versionMapping = {" ): index += 1 # We go until the end while not lines[index].startswith("}" ): index += 1 # We add the new version at the end lines[index - 1] += F" \"v{version}\": \"v{version}\",\n" with open(_A , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(_A ) if __name__ == "__main__": A : Dict = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") A : Optional[int] = parser.parse_args() update_custom_js(args.version)
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
def lowercase_ ( _A : int , _A : int ): """simple docstring""" while second != 0: lowerCamelCase__ : str = first & second first ^= second lowerCamelCase__ : List[Any] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() A : Dict = int(input("Enter the first number: ").strip()) A : List[str] = int(input("Enter the second number: ").strip()) print(f'{add(first, second) = }')
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
def lowercase_ ( _A : list , _A : int = 0 ): """simple docstring""" lowerCamelCase__ : List[Any] = length or len(_A ) lowerCamelCase__ : List[Any] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: lowerCamelCase__ , lowerCamelCase__ : Dict = list_data[i + 1], list_data[i] lowerCamelCase__ : str = True return list_data if not swapped else bubble_sort(_A , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
1
from typing import List from .keymap import KEYMAP, get_character def lowercase_ ( _A : str ): """simple docstring""" def decorator(_A : Any ): lowerCamelCase__ : List[Any] = getattr(_A , "handle_key" , [] ) handle += [key] setattr(_A , "handle_key" , _A ) return func return decorator def lowercase_ ( *_A : List[str] ): """simple docstring""" def decorator(_A : Union[str, Any] ): lowerCamelCase__ : List[str] = getattr(_A , "handle_key" , [] ) handle += keys setattr(_A , "handle_key" , _A ) return func return decorator class _lowercase ( lowercase__): """simple docstring""" def __new__( cls : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[Any] = super().__new__(cls , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not hasattr(__lowerCamelCase , "key_handler" ): setattr(__lowerCamelCase , "key_handler" , {} ) setattr(__lowerCamelCase , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): lowerCamelCase__ : Union[str, Any] = getattr(__lowerCamelCase , "handle_key" , [] ) for key in handled_keys: lowerCamelCase__ : str = value return new_cls @staticmethod def lowerCAmelCase ( cls : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = get_character() if char != KEYMAP["undefined"]: lowerCamelCase__ : int = ord(__lowerCamelCase ) lowerCamelCase__ : int = cls.key_handler.get(__lowerCamelCase ) if handler: lowerCamelCase__ : Union[str, Any] = char return handler(cls ) else: return None def lowercase_ ( cls : int ): """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : str = logging.get_logger(__name__) A : Dict = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "data2vec-vision" def __init__( self : Dict , __lowerCamelCase : List[str]=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Dict=1E-1_2 , __lowerCamelCase : str=224 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=[3, 5, 7, 11] , __lowerCamelCase : str=[1, 2, 3, 6] , __lowerCamelCase : int=True , __lowerCamelCase : Any=0.4 , __lowerCamelCase : Union[str, Any]=256 , __lowerCamelCase : int=1 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Dict=255 , **__lowerCamelCase : Any , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : List[str] = hidden_dropout_prob lowerCamelCase__ : str = attention_probs_dropout_prob lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : List[Any] = image_size lowerCamelCase__ : Tuple = patch_size lowerCamelCase__ : List[str] = num_channels lowerCamelCase__ : List[str] = use_mask_token lowerCamelCase__ : Tuple = use_absolute_position_embeddings lowerCamelCase__ : Optional[int] = use_relative_position_bias lowerCamelCase__ : str = use_shared_relative_position_bias lowerCamelCase__ : List[str] = layer_scale_init_value lowerCamelCase__ : List[str] = drop_path_rate lowerCamelCase__ : Optional[int] = use_mean_pooling # decode head attributes (semantic segmentation) lowerCamelCase__ : Dict = out_indices lowerCamelCase__ : Union[str, Any] = pool_scales # auxiliary head attributes (semantic segmentation) lowerCamelCase__ : str = use_auxiliary_head lowerCamelCase__ : Optional[int] = auxiliary_loss_weight lowerCamelCase__ : List[str] = auxiliary_channels lowerCamelCase__ : List[str] = auxiliary_num_convs lowerCamelCase__ : str = auxiliary_concat_input lowerCamelCase__ : Any = semantic_loss_ignore_index class _lowercase ( lowercase__): """simple docstring""" A__ = version.parse("1.11") @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 1E-4
5
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
1
def lowercase_ ( _A : int = 10 , _A : int = 1000 , _A : bool = True ): """simple docstring""" assert ( isinstance(_A , _A ) and isinstance(_A , _A ) and isinstance(_A , _A ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" ) return min_val if option else max_val def lowercase_ ( _A : int , _A : int ): """simple docstring""" return int((number_a + number_a) / 2 ) def lowercase_ ( _A : int , _A : int , _A : int ): """simple docstring""" assert ( isinstance(_A , _A ) and isinstance(_A , _A ) and isinstance(_A , _A ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("argument value for lower and higher must be(lower > higher)" ) if not lower < to_guess < higher: raise ValueError( "guess value must be within the range of lower and higher value" ) def answer(_A : int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("started..." ) lowerCamelCase__ : int = lower lowerCamelCase__ : Optional[int] = higher lowerCamelCase__ : Any = [] while True: lowerCamelCase__ : str = get_avg(_A , _A ) last_numbers.append(_A ) if answer(_A ) == "low": lowerCamelCase__ : Optional[Any] = number elif answer(_A ) == "high": lowerCamelCase__ : Union[str, Any] = number else: break print(F"guess the number : {last_numbers[-1]}" ) print(F"details : {last_numbers!s}" ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = int(input("Enter lower value : " ).strip() ) lowerCamelCase__ : Dict = int(input("Enter high value : " ).strip() ) lowerCamelCase__ : int = int(input("Enter value to guess : " ).strip() ) guess_the_number(_A , _A , _A ) if __name__ == "__main__": main()
5
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
1
def lowercase_ ( _A : float , _A : float , _A : int ): """simple docstring""" if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(_A , _A ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate lowerCamelCase__ : Any = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCamelCase__ : int = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
5
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : str = logging.get_logger(__name__) A : Tuple = {"vocab_file": "spiece.model"} A : Union[str, Any] = { "vocab_file": { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model", } } A : Optional[Any] = { "albert-base-v1": 512, "albert-large-v1": 512, "albert-xlarge-v1": 512, "albert-xxlarge-v1": 512, "albert-base-v2": 512, "albert-large-v2": 512, "albert-xlarge-v2": 512, "albert-xxlarge-v2": 512, } A : List[Any] = "▁" class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : List[str]="[SEP]" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Optional[Any]="[SEP]" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' lowerCamelCase__ : int = ( AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token ) lowerCamelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = do_lower_case lowerCamelCase__ : Optional[Any] = remove_space lowerCamelCase__ : int = keep_accents lowerCamelCase__ : Dict = vocab_file lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return len(self.sp_model ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Dict = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__dict__.copy() lowerCamelCase__ : Optional[int] = None return state def __setstate__( self : Tuple , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase__ : Dict = {} lowerCamelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ): '''simple docstring''' if self.remove_space: lowerCamelCase__ : List[Any] = " ".join(inputs.strip().split() ) else: lowerCamelCase__ : str = inputs lowerCamelCase__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase__ : Tuple = unicodedata.normalize("NFKD" , __lowerCamelCase ) lowerCamelCase__ : str = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] ) if self.do_lower_case: lowerCamelCase__ : Any = outputs.lower() return outputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.preprocess_text(__lowerCamelCase ) lowerCamelCase__ : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) lowerCamelCase__ : Tuple = [] for piece in pieces: if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase__ : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase__ : List[Any] = cur_pieces[1:] else: lowerCamelCase__ : Tuple = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCamelCase ) else: new_pieces.append(__lowerCamelCase ) return new_pieces def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] ): '''simple docstring''' return self.sp_model.PieceToId(__lowerCamelCase ) def lowerCAmelCase ( self : int , __lowerCamelCase : int ): '''simple docstring''' return self.sp_model.IdToPiece(__lowerCamelCase ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = [] lowerCamelCase__ : Dict = "" lowerCamelCase__ : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token lowerCamelCase__ : Dict = True lowerCamelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Tuple = [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is not None: return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : int = [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : str = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , "wb" ) as fi: lowerCamelCase__ : Tuple = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
5
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : Union[str, Any] = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() A : Dict = logging.get_logger(__name__) def lowercase_ ( _A : Optional[int] , _A : Any , _A : int ): """simple docstring""" lowerCamelCase__ : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_A , config=_A ) lowerCamelCase__ : Optional[int] = downstream_dict["projector.weight"] lowerCamelCase__ : Optional[int] = downstream_dict["projector.bias"] lowerCamelCase__ : int = downstream_dict["model.post_net.linear.weight"] lowerCamelCase__ : Optional[Any] = downstream_dict["model.post_net.linear.bias"] return model def lowercase_ ( _A : str , _A : List[Any] , _A : List[str] ): """simple docstring""" lowerCamelCase__ : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_A , config=_A ) lowerCamelCase__ : List[str] = downstream_dict["model.linear.weight"] lowerCamelCase__ : Optional[int] = downstream_dict["model.linear.bias"] return model def lowercase_ ( _A : Optional[int] , _A : str , _A : Optional[Any] ): """simple docstring""" lowerCamelCase__ : int = UniSpeechSatForXVector.from_pretrained(_A , config=_A ) lowerCamelCase__ : List[Any] = downstream_dict["connector.weight"] lowerCamelCase__ : int = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase__ : Optional[Any] = downstream_dict[ F"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] lowerCamelCase__ : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"] lowerCamelCase__ : str = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] lowerCamelCase__ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] lowerCamelCase__ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] lowerCamelCase__ : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] lowerCamelCase__ : Dict = downstream_dict["objective.W"] return model @torch.no_grad() def lowercase_ ( _A : str , _A : Tuple , _A : str , _A : Tuple ): """simple docstring""" lowerCamelCase__ : List[str] = torch.load(_A , map_location="cpu" ) lowerCamelCase__ : Dict = checkpoint["Downstream"] lowerCamelCase__ : List[Any] = UniSpeechSatConfig.from_pretrained(_A ) lowerCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained( _A , return_attention_mask=_A , do_normalize=_A ) lowerCamelCase__ : int = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): lowerCamelCase__ : str = convert_classification(_A , _A , _A ) elif arch.endswith("ForAudioFrameClassification" ): lowerCamelCase__ : Optional[Any] = convert_diarization(_A , _A , _A ) elif arch.endswith("ForXVector" ): lowerCamelCase__ : Optional[Any] = convert_xvector(_A , _A , _A ) else: raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: lowerCamelCase__ : Optional[Any] = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_A ) hf_model.save_pretrained(_A ) if __name__ == "__main__": A : Any = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") A : List[Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
5
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
1
from __future__ import annotations def lowercase_ ( _A : list ): """simple docstring""" if len(_A ) == 0: return [] lowerCamelCase__ , lowerCamelCase__ : Dict = min(_A ), max(_A ) lowerCamelCase__ : Any = int(max_value - min_value ) + 1 lowerCamelCase__ : list[list] = [[] for _ in range(_A )] for i in my_list: buckets[int(i - min_value )].append(_A ) return [v for bucket in buckets for v in sorted(_A )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
5
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
A : Optional[int] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" A : Any = [{"type": "code", "content": INSTALL_CONTENT}] A : str = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
5
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A : Optional[int] = logging.get_logger(__name__) A : str = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowercase ( lowercase__): """simple docstring""" A__ = "sew-d" def __init__( self : List[str] , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Any=3072 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : int=512 , __lowerCamelCase : Dict=256 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=("p2c", "c2p") , __lowerCamelCase : str="layer_norm" , __lowerCamelCase : Optional[Any]="gelu_python" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-7 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : List[Any]="group" , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=128 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=0.0_5 , __lowerCamelCase : str=10 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[str]=10 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]="mean" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Any=256 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : Union[str, Any]=2 , **__lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) lowerCamelCase__ : Any = hidden_size lowerCamelCase__ : Union[str, Any] = feat_extract_norm lowerCamelCase__ : List[str] = feat_extract_activation lowerCamelCase__ : Any = list(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = list(__lowerCamelCase ) lowerCamelCase__ : int = list(__lowerCamelCase ) lowerCamelCase__ : List[str] = conv_bias lowerCamelCase__ : Union[str, Any] = num_conv_pos_embeddings lowerCamelCase__ : int = num_conv_pos_embedding_groups lowerCamelCase__ : Union[str, Any] = len(self.conv_dim ) lowerCamelCase__ : Optional[Any] = num_hidden_layers lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Optional[int] = squeeze_factor lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : List[str] = position_buckets lowerCamelCase__ : List[str] = share_att_key lowerCamelCase__ : int = relative_attention lowerCamelCase__ : Dict = norm_rel_ebd lowerCamelCase__ : Optional[int] = list(__lowerCamelCase ) lowerCamelCase__ : Any = hidden_act lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Any = hidden_dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Any = activation_dropout lowerCamelCase__ : List[str] = feat_proj_dropout lowerCamelCase__ : int = final_dropout lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : List[Any] = feature_layer_norm_eps lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__ : List[str] = apply_spec_augment lowerCamelCase__ : Optional[int] = mask_time_prob lowerCamelCase__ : Optional[Any] = mask_time_length lowerCamelCase__ : Union[str, Any] = mask_time_min_masks lowerCamelCase__ : List[Any] = mask_feature_prob lowerCamelCase__ : Optional[Any] = mask_feature_length lowerCamelCase__ : Any = mask_feature_min_masks # ctc loss lowerCamelCase__ : Dict = ctc_loss_reduction lowerCamelCase__ : List[Any] = ctc_zero_infinity # sequence classification lowerCamelCase__ : Optional[Any] = use_weighted_layer_sum lowerCamelCase__ : Union[str, Any] = classifier_proj_size @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
5
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _lowercase ( lowercase__): """simple docstring""" A__ = "Speech2TextFeatureExtractor" A__ = "Speech2TextTokenizer" def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ): '''simple docstring''' super().__init__(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = self.feature_extractor lowerCamelCase__ : Optional[int] = False def __call__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__lowerCamelCase , **__lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) lowerCamelCase__ : Tuple = kwargs.pop("raw_speech" ) else: lowerCamelCase__ : Optional[int] = kwargs.pop("audio" , __lowerCamelCase ) lowerCamelCase__ : Any = kwargs.pop("sampling_rate" , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = kwargs.pop("text" , __lowerCamelCase ) if len(__lowerCamelCase ) > 0: lowerCamelCase__ : List[str] = args[0] lowerCamelCase__ : int = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: lowerCamelCase__ : List[str] = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase ) if text is not None: lowerCamelCase__ : str = self.tokenizer(__lowerCamelCase , **__lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowerCamelCase__ : List[str] = encodings["input_ids"] return inputs def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase ) @contextmanager def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) lowerCamelCase__ : int = True lowerCamelCase__ : Dict = self.tokenizer yield lowerCamelCase__ : Tuple = self.feature_extractor lowerCamelCase__ : Any = False
5
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset A : Tuple = "bert-base-cased" A : int = "google/pegasus-xsum" A : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."] A : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] A : int = "patrickvonplaten/t5-tiny-random" A : Optional[int] = "sshleifer/bart-tiny-random" A : Any = "sshleifer/tiny-mbart" A : Dict = "sshleifer/tiny-marian-en-de" def lowercase_ ( _A : Path , _A : list ): """simple docstring""" lowerCamelCase__ : Optional[Any] = "\n".join(_A ) Path(_A ).open("w" ).writelines(_A ) def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(_A , F"{split}.source" ) , _A ) _dump_articles(os.path.join(_A , F"{split}.target" ) , _A ) return tmp_dir class _lowercase ( lowercase__): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) lowerCamelCase__ : List[Any] = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) lowerCamelCase__ : Tuple = 4 lowerCamelCase__ : List[str] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCamelCase__ , lowerCamelCase__ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCamelCase__ : Optional[int] = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , ) lowerCamelCase__ : List[Any] = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCamelCase__ : List[str] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCamelCase__ : Tuple = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in ARTICLES ) lowerCamelCase__ : Any = max(len(tokenizer.encode(__lowerCamelCase ) ) for a in SUMMARIES ) lowerCamelCase__ : Optional[Any] = 4 lowerCamelCase__ : int = LegacySeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=20 , max_target_length=__lowerCamelCase , ) lowerCamelCase__ : Tuple = DataLoader(__lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCamelCase__ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCamelCase__ : int = tmp_dir.joinpath("train.source" ).open().readlines() lowerCamelCase__ : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__lowerCamelCase , __lowerCamelCase , 128 , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = {x.name for x in tmp_dir.iterdir()} lowerCamelCase__ : List[Any] = {x.name for x in save_dir.iterdir()} lowerCamelCase__ : Union[str, Any] = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__lowerCamelCase ) < len(__lowerCamelCase ) assert len(__lowerCamelCase ) == 1 assert len(packed_examples[0] ) == sum(len(__lowerCamelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if not FAIRSEQ_AVAILABLE: return lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self._get_dataset(max_len=64 ) lowerCamelCase__ : int = 64 lowerCamelCase__ : int = ds.make_dynamic_sampler(__lowerCamelCase , required_batch_size_multiple=__lowerCamelCase ) lowerCamelCase__ : int = [len(__lowerCamelCase ) for x in batch_sampler] assert len(set(__lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__lowerCamelCase ) == len(__lowerCamelCase ) # no dropped or added examples lowerCamelCase__ : int = DataLoader(__lowerCamelCase , batch_sampler=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) lowerCamelCase__ : Dict = [] lowerCamelCase__ : Optional[int] = [] for batch in data_loader: lowerCamelCase__ : int = batch["input_ids"].shape lowerCamelCase__ : Dict = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCamelCase__ : Any = np.product(batch["input_ids"].shape ) num_src_per_batch.append(__lowerCamelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(__lowerCamelCase ) assert num_src_per_batch[0] == max(__lowerCamelCase ) if failures: raise AssertionError(f"too many tokens in {len(__lowerCamelCase )} batches" ) def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = self._get_dataset(max_len=512 ) lowerCamelCase__ : Optional[Any] = 2 lowerCamelCase__ : Any = ds.make_sortish_sampler(__lowerCamelCase , shuffle=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) lowerCamelCase__ : List[Any] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCamelCase ) lowerCamelCase__ : Dict = tokenizer.pad_token_id def count_pad_tokens(__lowerCamelCase : int , __lowerCamelCase : List[str]="input_ids" ): return [batch[k].eq(__lowerCamelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) < sum(count_pad_tokens(__lowerCamelCase , k="labels" ) ) assert sum(count_pad_tokens(__lowerCamelCase ) ) < sum(count_pad_tokens(__lowerCamelCase ) ) assert len(__lowerCamelCase ) == len(__lowerCamelCase ) def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any]=1000 , __lowerCamelCase : Union[str, Any]=128 ): '''simple docstring''' if os.getenv("USE_REAL_DATA" , __lowerCamelCase ): lowerCamelCase__ : Optional[int] = "examples/seq2seq/wmt_en_ro" lowerCamelCase__ : Optional[int] = max_len * 2 * 64 if not Path(__lowerCamelCase ).joinpath("train.len" ).exists(): save_len_file(__lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : Dict = "examples/seq2seq/test_data/wmt_en_ro" lowerCamelCase__ : List[Any] = max_len * 4 save_len_file(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : str = AutoTokenizer.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : int = SeqaSeqDataset( __lowerCamelCase , data_dir=__lowerCamelCase , type_path="train" , max_source_length=__lowerCamelCase , max_target_length=__lowerCamelCase , n_obs=__lowerCamelCase , ) return ds, max_tokens, tokenizer def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_dataset() lowerCamelCase__ : str = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCamelCase ) ) lowerCamelCase__ : Tuple = set(DistributedSortishSampler(__lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCamelCase ) ) assert idsa.intersection(__lowerCamelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCAmelCase ( self : int , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase ) if tok_name == MBART_TINY: lowerCamelCase__ : Tuple = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCamelCase__ : List[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCamelCase__ : List[str] = SeqaSeqDataset( __lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__lowerCamelCase ) == 1 if tok_name == BART_TINY else len(__lowerCamelCase ) == 0
5
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Optional[int] = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class _lowercase ( lowercase__): """simple docstring""" A__ = "sew" def __init__( self : Optional[Any] , __lowerCamelCase : int=32 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : List[str]=1E-5 , __lowerCamelCase : Optional[int]="group" , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=128 , __lowerCamelCase : Dict=16 , __lowerCamelCase : int=True , __lowerCamelCase : Any=0.0_5 , __lowerCamelCase : int=10 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : str=10 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Dict="mean" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase ) lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : List[str] = feat_extract_norm lowerCamelCase__ : Dict = feat_extract_activation lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Dict = list(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = conv_bias lowerCamelCase__ : List[str] = num_conv_pos_embeddings lowerCamelCase__ : Tuple = num_conv_pos_embedding_groups lowerCamelCase__ : Optional[Any] = len(self.conv_dim ) lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : int = squeeze_factor lowerCamelCase__ : str = hidden_act lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Any = hidden_dropout lowerCamelCase__ : Tuple = attention_dropout lowerCamelCase__ : Tuple = activation_dropout lowerCamelCase__ : Tuple = feat_proj_dropout lowerCamelCase__ : str = final_dropout lowerCamelCase__ : int = layerdrop lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = initializer_range lowerCamelCase__ : int = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__ : Tuple = apply_spec_augment lowerCamelCase__ : Optional[Any] = mask_time_prob lowerCamelCase__ : Tuple = mask_time_length lowerCamelCase__ : List[Any] = mask_time_min_masks lowerCamelCase__ : List[Any] = mask_feature_prob lowerCamelCase__ : Any = mask_feature_length lowerCamelCase__ : Any = mask_feature_min_masks # ctc loss lowerCamelCase__ : List[str] = ctc_loss_reduction lowerCamelCase__ : Dict = ctc_zero_infinity # sequence classification lowerCamelCase__ : Tuple = use_weighted_layer_sum lowerCamelCase__ : Optional[Any] = classifier_proj_size @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
5
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class _lowercase ( unittest.TestCase): """simple docstring""" A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCAmelCase ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Any = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) lowerCamelCase__ : Union[str, Any] = VideoClassificationPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase , top_k=2 ) lowerCamelCase__ : List[Any] = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def lowerCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' for example in examples: lowerCamelCase__ : Dict = video_classifier(__lowerCamelCase ) self.assertEqual( __lowerCamelCase , [ {"score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase )}, {"score": ANY(__lowerCamelCase ), "label": ANY(__lowerCamelCase )}, ] , ) @require_torch def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowerCamelCase__ : List[str] = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) lowerCamelCase__ : str = pipeline( "video-classification" , model=__lowerCamelCase , feature_extractor=__lowerCamelCase , frame_sampling_rate=4 ) lowerCamelCase__ : List[Any] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) lowerCamelCase__ : Union[str, Any] = video_classifier(__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , ) lowerCamelCase__ : Any = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}], [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}], ] , ) @require_tf def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass
5
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowercase__) class _lowercase ( lowercase__): """simple docstring""" A__ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True}) A__ = Features({"text": Value("string")}) A__ = Features({"summary": Value("string")}) A__ = "text" A__ = "summary" @property def lowerCAmelCase ( self : str ): '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
1
from __future__ import annotations def lowercase_ ( _A : list[int | float] , _A : int , _A : int ): """simple docstring""" if len(_A ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(_A ) or left < -len(_A ) or right >= len(_A ) or right < -len(_A ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] lowerCamelCase__ : List[Any] = (left + right) >> 1 # the middle lowerCamelCase__ : List[Any] = find_max(_A , _A , _A ) # find max in range[left, mid] lowerCamelCase__ : List[Any] = find_max(_A , mid + 1 , _A ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( "The `inpainting.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionInpaintPipeline` instead." )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaControlnetImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image", "hint"] A__ = ["image_embeds", "negative_image_embeds", "image", "hint"] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : str ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : int ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Any = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : str = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Any = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.dummy_unet lowerCamelCase__ : Union[str, Any] = self.dummy_movq lowerCamelCase__ : Optional[Any] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Dict = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=0 ): '''simple docstring''' lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) # create hint lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : List[str] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : str = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Any = "cpu" lowerCamelCase__ : Any = self.get_dummy_components() lowerCamelCase__ : Union[str, Any] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : str = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : str = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : int = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : Any = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : List[str] = init_image.resize((512, 512) ) lowerCamelCase__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) lowerCamelCase__ : Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase ) ).float() / 2_5_5.0 lowerCamelCase__ : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowerCamelCase__ : Optional[Any] = "A robot, 4k photo" lowerCamelCase__ : str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) lowerCamelCase__ : List[Any] = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , image=__lowerCamelCase , strength=0.8_5 , generator=__lowerCamelCase , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Dict = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , hint=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , ) lowerCamelCase__ : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
1
from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=lowercase__): """simple docstring""" A__ = ["note_seq"] def __init__( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : int ): '''simple docstring''' requires_backends(self , ["note_seq"] ) @classmethod def lowerCAmelCase ( cls : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["note_seq"] ) @classmethod def lowerCAmelCase ( cls : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ): '''simple docstring''' requires_backends(cls , ["note_seq"] )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = [] lowerCamelCase__ : Dict = 1 while len(_A ) < 1E6: constant.append(str(_A ) ) i += 1 lowerCamelCase__ : List[Any] = "".join(_A ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
from collections.abc import Callable def lowercase_ ( _A : Callable[[float], float] , _A : float , _A : float ): """simple docstring""" lowerCamelCase__ : float = a lowerCamelCase__ : float = b if function(_A ) == 0: # one of the a or b is a root for the function return a elif function(_A ) == 0: return b elif ( function(_A ) * function(_A ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: lowerCamelCase__ : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_A ) == 0: return mid elif function(_A ) * function(_A ) < 0: lowerCamelCase__ : int = mid else: lowerCamelCase__ : Optional[int] = mid lowerCamelCase__ : Optional[int] = start + (end - start) / 2.0 return mid def lowercase_ ( _A : float ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
5
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A : int = logging.getLogger(__name__) def lowercase_ ( _A : Dict , _A : str ): """simple docstring""" return (preds == labels).mean() @dataclass class _lowercase : """simple docstring""" A__ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) A__ = field( default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"}) A__ = field( default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) A__ = field( default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class _lowercase : """simple docstring""" A__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys())}) A__ = field(metadata={"help": "Should contain the data files for the task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , _A ) # Set seed set_seed(training_args.seed ) try: lowerCamelCase__ : Dict = processors[data_args.task_name]() lowerCamelCase__ : Dict = processor.get_labels() lowerCamelCase__ : Optional[int] = len(_A ) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowerCamelCase__ : Any = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCamelCase__ : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , ) # Get datasets lowerCamelCase__ : int = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCamelCase__ : Tuple = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_A : EvalPrediction ) -> Dict: lowerCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_A , p.label_ids )} # Data collator lowerCamelCase__ : Optional[int] = DataCollatorWithPadding(_A , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCamelCase__ : List[str] = Trainer( model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , data_collator=_A , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCamelCase__ : int = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase__ : str = trainer.evaluate() lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_master(): with open(_A , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , _A , _A ) writer.write("%s = %s\n" % (key, value) ) results.update(_A ) return results def lowercase_ ( _A : Tuple ): """simple docstring""" main() if __name__ == "__main__": main()
5
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _lowercase ( lowercase__ , lowercase__): """simple docstring""" @register_to_config def __init__( self : List[Any] , __lowerCamelCase : int = 768 , ): '''simple docstring''' super().__init__() lowerCamelCase__ : List[Any] = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) ) lowerCamelCase__ : int = nn.Parameter(torch.ones(1 , __lowerCamelCase ) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Union[str, torch.device]] = None , __lowerCamelCase : Optional[torch.dtype] = None , ): '''simple docstring''' lowerCamelCase__ : Dict = nn.Parameter(self.mean.to(__lowerCamelCase ).to(__lowerCamelCase ) ) lowerCamelCase__ : Union[str, Any] = nn.Parameter(self.std.to(__lowerCamelCase ).to(__lowerCamelCase ) ) return self def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : str = (embeds - self.mean) * 1.0 / self.std return embeds def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = (embeds * self.std) + self.mean return embeds
5
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
1
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) A : List[str] = None A : Dict = { "7B": 11008, "13B": 13824, "30B": 17920, "65B": 22016, "70B": 28672, } A : Tuple = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def lowercase_ ( _A : Union[str, Any] , _A : Optional[int]=1 , _A : List[Any]=256 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" with open(_A , "r" ) as f: return json.load(_A ) def lowercase_ ( _A : Tuple , _A : Union[str, Any] ): """simple docstring""" with open(_A , "w" ) as f: json.dump(_A , _A ) def lowercase_ ( _A : Tuple , _A : int , _A : Dict , _A : List[str]=True ): """simple docstring""" os.makedirs(_A , exist_ok=_A ) lowerCamelCase__ : Optional[Any] = os.path.join(_A , "tmp" ) os.makedirs(_A , exist_ok=_A ) lowerCamelCase__ : str = read_json(os.path.join(_A , "params.json" ) ) lowerCamelCase__ : int = NUM_SHARDS[model_size] lowerCamelCase__ : Optional[Any] = params["n_layers"] lowerCamelCase__ : List[Any] = params["n_heads"] lowerCamelCase__ : List[str] = n_heads // num_shards lowerCamelCase__ : Dict = params["dim"] lowerCamelCase__ : Dict = dim // n_heads lowerCamelCase__ : str = 10_000.0 lowerCamelCase__ : Tuple = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: lowerCamelCase__ : Dict = params["n_kv_heads"] # for GQA / MQA lowerCamelCase__ : str = n_heads_per_shard // num_key_value_heads lowerCamelCase__ : Optional[Any] = dim // num_key_value_heads else: # compatibility with other checkpoints lowerCamelCase__ : List[Any] = n_heads lowerCamelCase__ : Dict = n_heads_per_shard lowerCamelCase__ : List[str] = dim # permute for sliced rotary def permute(_A : int , _A : Union[str, Any]=n_heads , _A : Tuple=dim , _A : List[str]=dim ): return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A ) print(F"Fetching all parameters from the checkpoint at {input_base_path}." ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) lowerCamelCase__ : Any = torch.load(os.path.join(_A , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded lowerCamelCase__ : str = [ torch.load(os.path.join(_A , F"consolidated.{i:02d}.pth" ) , map_location="cpu" ) for i in range(_A ) ] lowerCamelCase__ : Dict = 0 lowerCamelCase__ : Tuple = {"weight_map": {}} for layer_i in range(_A ): lowerCamelCase__ : Dict = F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded lowerCamelCase__ : Optional[int] = { F"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[F"layers.{layer_i}.attention.wq.weight"] ), F"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[F"layers.{layer_i}.attention.wk.weight"] ), F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"], F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"], F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"], F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"], F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"], F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"], F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. lowerCamelCase__ : Optional[Any] = { F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ F"layers.{layer_i}.attention_norm.weight" ].clone(), F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ F"layers.{layer_i}.ffn_norm.weight" ].clone(), } lowerCamelCase__ : List[Any] = permute( torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) ) lowerCamelCase__ : List[Any] = permute( torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wk.weight"].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , ) lowerCamelCase__ : Dict = torch.cat( [ loaded[i][F"layers.{layer_i}.attention.wv.weight"].view( _A , _A , _A ) for i in range(_A ) ] , dim=0 , ).reshape(_A , _A ) lowerCamelCase__ : Union[str, Any] = torch.cat( [loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_A )] , dim=1 ) lowerCamelCase__ : Tuple = torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_A )] , dim=0 ) lowerCamelCase__ : Optional[Any] = torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_A )] , dim=1 ) lowerCamelCase__ : Any = torch.cat( [loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_A )] , dim=0 ) lowerCamelCase__ : Optional[Any] = inv_freq for k, v in state_dict.items(): lowerCamelCase__ : Optional[int] = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) lowerCamelCase__ : Tuple = F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded lowerCamelCase__ : Union[str, Any] = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: lowerCamelCase__ : Optional[Any] = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(_A )] , dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(_A )] , dim=0 ), } for k, v in state_dict.items(): lowerCamelCase__ : Dict = filename param_count += v.numel() torch.save(_A , os.path.join(_A , _A ) ) # Write configs lowerCamelCase__ : Optional[int] = {"total_size": param_count * 2} write_json(_A , os.path.join(_A , "pytorch_model.bin.index.json" ) ) lowerCamelCase__ : List[Any] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 lowerCamelCase__ : Union[str, Any] = params["multiple_of"] if "multiple_of" in params else 256 lowerCamelCase__ : Optional[int] = LlamaConfig( hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=_A , ) config.save_pretrained(_A ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) lowerCamelCase__ : List[Any] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(_A , safe_serialization=_A ) shutil.rmtree(_A ) def lowercase_ ( _A : Any , _A : Optional[int] ): """simple docstring""" lowerCamelCase__ : List[str] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." ) lowerCamelCase__ : str = tokenizer_class(_A ) tokenizer.save_pretrained(_A ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=_A , help="Whether or not to save using `safetensors`." ) lowerCamelCase__ : List[str] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) lowerCamelCase__ : Optional[int] = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , _A ) if __name__ == "__main__": main()
5
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowercase_ ( _A : Dict ): """simple docstring""" return EnvironmentCommand() def lowercase_ ( _A : List[Any] ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class _lowercase ( lowercase__): """simple docstring""" @staticmethod def lowerCAmelCase ( __lowerCamelCase : ArgumentParser ): '''simple docstring''' lowerCamelCase__ : Any = parser.add_parser("env" ) download_parser.set_defaults(func=__lowerCamelCase ) download_parser.add_argument( "--accelerate-config_file" , default=__lowerCamelCase , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=__lowerCamelCase ) def __init__( self : Dict , __lowerCamelCase : int , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : int = accelerate_config_file def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = "not installed" if is_safetensors_available(): import safetensors lowerCamelCase__ : Tuple = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors lowerCamelCase__ : str = f"{safetensors.__version__} but is ignored because of PyTorch version too old." lowerCamelCase__ : Tuple = "not installed" lowerCamelCase__ : List[str] = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file lowerCamelCase__ : Union[str, Any] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__lowerCamelCase ): lowerCamelCase__ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() lowerCamelCase__ : Any = ( "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else f"\t{accelerate_config}" ) lowerCamelCase__ : List[Any] = "not installed" lowerCamelCase__ : Optional[int] = "NA" if is_torch_available(): import torch lowerCamelCase__ : Tuple = torch.__version__ lowerCamelCase__ : Optional[Any] = torch.cuda.is_available() lowerCamelCase__ : List[str] = "not installed" lowerCamelCase__ : Dict = "NA" if is_tf_available(): import tensorflow as tf lowerCamelCase__ : List[str] = tf.__version__ try: # deprecated in v2.1 lowerCamelCase__ : Union[str, Any] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool lowerCamelCase__ : Union[str, Any] = bool(tf.config.list_physical_devices("GPU" ) ) lowerCamelCase__ : int = "not installed" lowerCamelCase__ : List[Any] = "not installed" lowerCamelCase__ : int = "not installed" lowerCamelCase__ : Tuple = "NA" if is_flax_available(): import flax import jax import jaxlib lowerCamelCase__ : List[str] = flax.__version__ lowerCamelCase__ : Tuple = jax.__version__ lowerCamelCase__ : Tuple = jaxlib.__version__ lowerCamelCase__ : Any = jax.lib.xla_bridge.get_backend().platform lowerCamelCase__ : Union[str, Any] = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f"{safetensors_version}", "Accelerate version": f"{accelerate_version}", "Accelerate config": f"{accelerate_config_str}", "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})", "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})", "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})", "Jax version": f"{jax_version}", "JaxLib version": f"{jaxlib_version}", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(__lowerCamelCase ) ) return info @staticmethod def lowerCAmelCase ( __lowerCamelCase : int ): '''simple docstring''' return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
5
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
1
from __future__ import annotations from math import ceil, floor, sqrt def lowercase_ ( _A : int = 2000000 ): """simple docstring""" lowerCamelCase__ : list[int] = [0] lowerCamelCase__ : int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target lowerCamelCase__ : int = 0 # the area corresponding to the grid that gives the product closest to target lowerCamelCase__ : int = 0 # an estimate of b, using the quadratic formula lowerCamelCase__ : float # the largest integer less than b_estimate lowerCamelCase__ : int # the largest integer less than b_estimate lowerCamelCase__ : int # the triangle number corresponding to b_floor lowerCamelCase__ : int # the triangle number corresponding to b_ceil lowerCamelCase__ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): lowerCamelCase__ : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 lowerCamelCase__ : Optional[int] = floor(_A ) lowerCamelCase__ : Tuple = ceil(_A ) lowerCamelCase__ : str = triangle_numbers[b_floor] lowerCamelCase__ : List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): lowerCamelCase__ : List[str] = triangle_b_first_guess * triangle_a lowerCamelCase__ : Any = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): lowerCamelCase__ : List[Any] = triangle_b_second_guess * triangle_a lowerCamelCase__ : Union[str, Any] = idx_a * b_ceil return area if __name__ == "__main__": print(f'{solution() = }')
5
import os from datetime import datetime as dt from github import Github A : Union[str, Any] = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] ) lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" ) lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" ) for issue in open_issues: lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
5
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin A : List[str] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A : int = 250004 A : Optional[int] = 250020 @require_sentencepiece @require_tokenizers class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = MBartaaTokenizer A__ = MBartaaTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : str ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase__ : Tuple = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[int] = "<s>" lowerCamelCase__ : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 1054 ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1054 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = MBartaaTokenizer(__lowerCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCamelCase ) lowerCamelCase__ : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCamelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : str = {"input_ids": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCamelCase__ : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = tempfile.mkdtemp() lowerCamelCase__ : List[str] = tokenizer_r.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) lowerCamelCase__ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : List[str] = tokenizer_r.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Any = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=True lowerCamelCase__ : List[str] = tempfile.mkdtemp() lowerCamelCase__ : Tuple = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) lowerCamelCase__ : str = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(__lowerCamelCase , __lowerCamelCase ) # Checks everything loads correctly in the same way lowerCamelCase__ : Union[str, Any] = tokenizer_r.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : str = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) # Save tokenizer rust, legacy_format=False lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp() lowerCamelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCamelCase , legacy_format=__lowerCamelCase ) lowerCamelCase__ : str = tokenizer_p.save_pretrained(__lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCamelCase__ : Optional[int] = tokenizer_r.from_pretrained(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = tokenizer_p.from_pretrained(__lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCamelCase , __lowerCamelCase ) ) shutil.rmtree(__lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" A__ = "facebook/mbart-large-50-one-to-many-mmt" A__ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] A__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] A__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2] @classmethod def lowerCAmelCase ( cls : Optional[int] ): '''simple docstring''' lowerCamelCase__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) lowerCamelCase__ : Optional[int] = 1 return cls def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250038 ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) lowerCamelCase__ : List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowerCamelCase__ : List[str] = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , __lowerCamelCase ) lowerCamelCase__ : Tuple = 10 lowerCamelCase__ : List[str] = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0] self.assertEqual(ids[0] , __lowerCamelCase ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250053, 250001] ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = tempfile.mkdtemp() lowerCamelCase__ : List[str] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = MBartaaTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase ) @require_torch def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" ) lowerCamelCase__ : List[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : List[str] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) lowerCamelCase__ : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCamelCase__ : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" ) lowerCamelCase__ : str = self.tokenizer( text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="pt" ) lowerCamelCase__ : Any = targets["input_ids"] lowerCamelCase__ : Union[str, Any] = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Dict = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # en_XX, A, test, EOS "input_ids": [[250004, 62, 3034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
5
from __future__ import annotations def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ): """simple docstring""" lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowerCamelCase__ : Dict = { "a": 0.08_497, "b": 0.01_492, "c": 0.02_202, "d": 0.04_253, "e": 0.11_162, "f": 0.02_228, "g": 0.02_015, "h": 0.06_094, "i": 0.07_546, "j": 0.00_153, "k": 0.01_292, "l": 0.04_025, "m": 0.02_406, "n": 0.06_749, "o": 0.07_507, "p": 0.01_929, "q": 0.00_095, "r": 0.07_587, "s": 0.06_327, "t": 0.09_356, "u": 0.02_758, "v": 0.00_978, "w": 0.02_560, "x": 0.00_150, "y": 0.01_994, "z": 0.00_077, } else: # Custom frequencies dictionary lowerCamelCase__ : Optional[int] = frequencies_dict if not case_sensitive: lowerCamelCase__ : str = ciphertext.lower() # Chi squared statistic values lowerCamelCase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowerCamelCase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowerCamelCase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowerCamelCase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowerCamelCase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowerCamelCase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowerCamelCase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowerCamelCase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
5
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowercase_ ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Optional[int] = coefficient_matrix.shape lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: lowerCamelCase__ : Dict = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(_A ) if colsa != 1: lowerCamelCase__ : Optional[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(_A ) if rowsa != rowsa: lowerCamelCase__ : Union[str, Any] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(_A ) if len(_A ) != rowsa: lowerCamelCase__ : Union[str, Any] = ( "Number of initial values must be equal to number of rows in coefficient " F"matrix but received {len(_A )} and {rowsa}" ) raise ValueError(_A ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) lowerCamelCase__ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = table.shape strictly_diagonally_dominant(_A ) # Iterates the whole matrix for given number of times for _ in range(_A ): lowerCamelCase__ : Tuple = [] for row in range(_A ): lowerCamelCase__ : List[str] = 0 for col in range(_A ): if col == row: lowerCamelCase__ : Union[str, Any] = table[row][col] elif col == cols - 1: lowerCamelCase__ : Tuple = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowerCamelCase__ : str = (temp + val) / denom new_val.append(_A ) lowerCamelCase__ : Optional[Any] = new_val return [float(_A ) for i in new_val] def lowercase_ ( _A : NDArray[floataa] ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = table.shape lowerCamelCase__ : List[str] = True for i in range(0 , _A ): lowerCamelCase__ : Dict = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
5
def lowercase_ ( _A : int ): """simple docstring""" if not isinstance(_A , _A ): lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 0: return False lowerCamelCase__ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
5
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : """simple docstring""" def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=[10, 20, 30, 40] , __lowerCamelCase : List[str]=[2, 2, 3, 2] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=10 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Any=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : str = image_size lowerCamelCase__ : str = num_channels lowerCamelCase__ : Optional[Any] = num_stages lowerCamelCase__ : Optional[int] = hidden_sizes lowerCamelCase__ : Union[str, Any] = depths lowerCamelCase__ : Any = is_training lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : Dict = intermediate_size lowerCamelCase__ : int = hidden_act lowerCamelCase__ : Tuple = type_sequence_label_size lowerCamelCase__ : Tuple = initializer_range lowerCamelCase__ : Tuple = out_features lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = scope lowerCamelCase__ : int = num_stages def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = None if self.use_labels: lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : int ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def lowerCAmelCase ( self : str ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=__lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : Any = UperNetForSemanticSegmentation(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : int = model(__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : List[str] = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Optional[int] = config_and_inputs lowerCamelCase__ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = (UperNetForSemanticSegmentation,) if is_torch_available() else () A__ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} A__ = False A__ = False A__ = False A__ = False A__ = False A__ = False def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : int = UperNetModelTester(self ) lowerCamelCase__ : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] = model_class(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : Any = [*signature.parameters.keys()] lowerCamelCase__ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not have a base model" ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="UperNet does not have a base model" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): lowerCamelCase__ : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) lowerCamelCase__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ : Optional[int] = _config_zero_init(__lowerCamelCase ) lowerCamelCase__ : Any = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase__ : Tuple = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , ) @unittest.skip(reason="UperNet does not have tied weights" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[int] = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase__ : List[str] = Image.open(_A ).convert("RGB" ) return image @require_torch @require_vision @slow class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase__ : int = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__lowerCamelCase ) lowerCamelCase__ : str = prepare_img() lowerCamelCase__ : Any = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : str = model(**__lowerCamelCase ) lowerCamelCase__ : Any = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Optional[int] = torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = prepare_img() lowerCamelCase__ : str = processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase ) with torch.no_grad(): lowerCamelCase__ : Union[str, Any] = model(**__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
5
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) A : Optional[int] = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowercase_ ( _A : str ): """simple docstring""" lowerCamelCase__ : Optional[int] = 384 if "tiny" in model_name: lowerCamelCase__ : str = [3, 3, 9, 3] lowerCamelCase__ : Optional[int] = [96, 192, 384, 768] if "small" in model_name: lowerCamelCase__ : Union[str, Any] = [3, 3, 27, 3] lowerCamelCase__ : Any = [96, 192, 384, 768] if "base" in model_name: lowerCamelCase__ : List[Any] = [3, 3, 27, 3] lowerCamelCase__ : Optional[int] = [128, 256, 512, 1024] lowerCamelCase__ : Optional[int] = 512 if "large" in model_name: lowerCamelCase__ : Optional[int] = [3, 3, 27, 3] lowerCamelCase__ : Optional[Any] = [192, 384, 768, 1536] lowerCamelCase__ : Optional[Any] = 768 if "xlarge" in model_name: lowerCamelCase__ : List[Any] = [3, 3, 27, 3] lowerCamelCase__ : Tuple = [256, 512, 1024, 2048] lowerCamelCase__ : int = 1024 # set label information lowerCamelCase__ : Optional[Any] = 150 lowerCamelCase__ : int = "huggingface/label-files" lowerCamelCase__ : Dict = "ade20k-id2label.json" lowerCamelCase__ : Optional[int] = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) ) lowerCamelCase__ : List[str] = {int(_A ): v for k, v in idalabel.items()} lowerCamelCase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowerCamelCase__ : Dict = ConvNextConfig( depths=_A , hidden_sizes=_A , out_features=["stage1", "stage2", "stage3", "stage4"] ) lowerCamelCase__ : Optional[Any] = UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : int = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") ) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") ) if i > 0: rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") ) rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") ) rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") ) rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") ) rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def lowercase_ ( _A : Optional[int] , _A : List[str] , _A : int ): """simple docstring""" lowerCamelCase__ : Optional[Any] = dct.pop(_A ) lowerCamelCase__ : Dict = val def lowercase_ ( _A : Tuple , _A : Optional[int] , _A : int ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } lowerCamelCase__ : Union[str, Any] = model_name_to_url[model_name] lowerCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["state_dict"] lowerCamelCase__ : Dict = get_upernet_config(_A ) lowerCamelCase__ : Dict = UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase__ : Optional[int] = state_dict.pop(_A ) if "bn" in key: lowerCamelCase__ : Optional[int] = key.replace("bn" , "batch_norm" ) lowerCamelCase__ : List[str] = val # rename keys lowerCamelCase__ : int = create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) model.load_state_dict(_A ) # verify on image lowerCamelCase__ : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" lowerCamelCase__ : List[Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert("RGB" ) lowerCamelCase__ : Tuple = SegformerImageProcessor() lowerCamelCase__ : List[Any] = processor(_A , return_tensors="pt" ).pixel_values with torch.no_grad(): lowerCamelCase__ : Optional[int] = model(_A ) if model_name == "upernet-convnext-tiny": lowerCamelCase__ : Optional[int] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": lowerCamelCase__ : Tuple = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": lowerCamelCase__ : str = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": lowerCamelCase__ : Union[str, Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": lowerCamelCase__ : Tuple = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(F"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_A ) if push_to_hub: print(F"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(F"openmmlab/{model_name}" ) processor.push_to_hub(F"openmmlab/{model_name}" ) if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-convnext-tiny", type=str, choices=[f'upernet-convnext-{size}' for size in ["tiny", "small", "base", "large", "xlarge"]], help="Name of the ConvNext UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) A : Union[str, Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
5
from __future__ import annotations import time import numpy as np A : Dict = [8, 5, 9, 7] A : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A : Any = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _lowercase : """simple docstring""" def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ): '''simple docstring''' lowerCamelCase__ : int = claim_vector lowerCamelCase__ : str = allocated_resources_table lowerCamelCase__ : int = maximum_claim_table def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.__need().index(__lowerCamelCase ): i for i in self.__need()} def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.__need() lowerCamelCase__ : str = self.__allocated_resources_table lowerCamelCase__ : List[Any] = self.__available_resources() lowerCamelCase__ : str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n" ) while need_list: lowerCamelCase__ : int = False for each_need in need_list: lowerCamelCase__ : Dict = True for index, need in enumerate(__lowerCamelCase ): if need > available_resources[index]: lowerCamelCase__ : str = False break if execution: lowerCamelCase__ : Tuple = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCamelCase__ : Any = original_need_index print(f"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(__lowerCamelCase ) # update available/freed resources stack lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(__lowerCamelCase ) for x in available_resources] ) ) break if safe: print("The process is in a safe state.\n" ) else: print("System in unsafe state. Aborting...\n" ) break def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' print(" " * 9 + "Allocated Resource Table" ) for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print(" " * 9 + "System Resource Table" ) for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}" + " ".join(f"{it:>8}" for it in item ) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) ) print( "Initial Available Resources: " + " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
from typing import TYPE_CHECKING from ...utils import _LazyModule A : List[str] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = BarthezTokenizer A__ = BarthezTokenizerFast A__ = True A__ = True def lowerCAmelCase ( self : int ): '''simple docstring''' super().setUp() lowerCamelCase__ : List[str] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Any = "<pad>" lowerCamelCase__ : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__lowerCamelCase ) , 101122 ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase__ : str = [0, 57, 3018, 70307, 91, 2] lowerCamelCase__ : Tuple = self.tokenizer( __lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) lowerCamelCase__ : Any = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase__ : Any = self.get_tokenizer() lowerCamelCase__ : Tuple = self.get_rust_tokenizer() lowerCamelCase__ : Union[str, Any] = "I was born in 92000, and this is falsé." lowerCamelCase__ : Dict = tokenizer.tokenize(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Tuple = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : List[str] = self.get_rust_tokenizer() lowerCamelCase__ : Optional[Any] = tokenizer.encode(__lowerCamelCase ) lowerCamelCase__ : List[Any] = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : int = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. lowerCamelCase__ : List[str] = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
5
1
import math import unittest def lowercase_ ( _A : int ): """simple docstring""" assert isinstance(_A , _A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowerCAmelCase ( self : int ): '''simple docstring''' with self.assertRaises(__lowerCamelCase ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
5
import cva import numpy as np class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] , __lowerCamelCase : float , __lowerCamelCase : int ): '''simple docstring''' if k in (0.0_4, 0.0_6): lowerCamelCase__ : int = k lowerCamelCase__ : List[str] = window_size else: raise ValueError("invalid k value" ) def __str__( self : str ): '''simple docstring''' return str(self.k ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = cva.imread(__lowerCamelCase , 0 ) lowerCamelCase__ , lowerCamelCase__ : Any = img.shape lowerCamelCase__ : list[list[int]] = [] lowerCamelCase__ : List[Any] = img.copy() lowerCamelCase__ : int = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase__ , lowerCamelCase__ : int = np.gradient(__lowerCamelCase ) lowerCamelCase__ : Dict = dx**2 lowerCamelCase__ : Optional[Any] = dy**2 lowerCamelCase__ : int = dx * dy lowerCamelCase__ : Union[str, Any] = 0.0_4 lowerCamelCase__ : Any = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): lowerCamelCase__ : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : str = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase__ : Optional[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase__ : List[str] = wxx + wyy lowerCamelCase__ : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A : Tuple = HarrisCorner(0.0_4, 3) A, A : Optional[int] = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
5
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor A : Union[str, Any] = logging.get_logger(__name__) class _lowercase ( lowercase__): """simple docstring""" def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ): '''simple docstring''' warnings.warn( "The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DonutImageProcessor instead." , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
5
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : """simple docstring""" def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ): '''simple docstring''' lowerCamelCase__ : Dict = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Any = seq_length lowerCamelCase__ : List[str] = is_training lowerCamelCase__ : int = use_input_mask lowerCamelCase__ : List[str] = use_token_type_ids lowerCamelCase__ : int = use_labels lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : List[Any] = embedding_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : Any = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_hidden_groups lowerCamelCase__ : Optional[int] = num_attention_heads lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = type_sequence_label_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : str = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Any = scope def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[int] = None if self.use_input_mask: lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = None lowerCamelCase__ : int = None if self.use_labels: lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : str ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : str = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ : int = self.num_labels lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.num_labels lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.num_choices lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Union[str, Any] = config_and_inputs lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A__ = True def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) lowerCamelCase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = AlbertModelTester(self ) lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Dict = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" ) lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) lowerCamelCase__ : Dict = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
5
1
import os from collections.abc import Iterator def lowercase_ ( _A : str = "." ): """simple docstring""" for dir_path, dir_names, filenames in os.walk(_A ): lowerCamelCase__ : Union[str, Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_A )[1] in (".py", ".ipynb"): yield os.path.join(_A , _A ).lstrip("./" ) def lowercase_ ( _A : Union[str, Any] ): """simple docstring""" return F"{i * ' '}*" if i else "\n##" def lowercase_ ( _A : str , _A : str ): """simple docstring""" lowerCamelCase__ : str = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_A ) or old_parts[i] != new_part) and new_part: print(F"{md_prefix(_A )} {new_part.replace('_' , ' ' ).title()}" ) return new_path def lowercase_ ( _A : str = "." ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = "" for filepath in sorted(good_file_paths(_A ) ): lowerCamelCase__ , lowerCamelCase__ : List[str] = os.path.split(_A ) if filepath != old_path: lowerCamelCase__ : List[Any] = print_path(_A , _A ) lowerCamelCase__ : Tuple = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCamelCase__ : Union[str, Any] = F"{filepath}/{filename}".replace(" " , "%20" ) lowerCamelCase__ : str = os.path.splitext(filename.replace("_" , " " ).title() )[0] print(F"{md_prefix(_A )} [{filename}]({url})" ) if __name__ == "__main__": print_directory_md(".")
5
import os def lowercase_ ( _A : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_A ) , _A ) ) as input_file: lowerCamelCase__ : List[Any] = [ [int(_A ) for element in line.split("," )] for line in input_file.readlines() ] lowerCamelCase__ : Optional[Any] = len(_A ) lowerCamelCase__ : Union[str, Any] = len(matrix[0] ) lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(_A )] for _ in range(_A )] for i in range(_A ): lowerCamelCase__ : Optional[Any] = matrix[i][0] for j in range(1 , _A ): for i in range(_A ): lowerCamelCase__ : int = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _A ): lowerCamelCase__ : Tuple = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCamelCase__ : str = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
5
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _lowercase ( datasets.Metric): """simple docstring""" def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[ "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ): '''simple docstring''' lowerCamelCase__ : str = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : List[str] = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") A : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) A : Optional[int] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): out_file.write(data) A : int = BeautifulSoup(res.text, "html.parser") A : Any = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
5
1
def lowercase_ ( _A : int , _A : list ): """simple docstring""" _enforce_args(_A , _A ) if n == 0: return 0 lowerCamelCase__ : Any = float("-inf" ) for i in range(1 , n + 1 ): lowerCamelCase__ : List[str] = max( _A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) ) return max_revue def lowercase_ ( _A : int , _A : list ): """simple docstring""" _enforce_args(_A , _A ) lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(_A , _A , _A ) def lowercase_ ( _A : int , _A : list , _A : list ): """simple docstring""" if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCamelCase__ : Optional[Any] = float("-inf" ) for i in range(1 , n + 1 ): lowerCamelCase__ : int = max( _A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , ) lowerCamelCase__ : List[Any] = max_revenue return max_rev[n] def lowercase_ ( _A : int , _A : list ): """simple docstring""" _enforce_args(_A , _A ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )] lowerCamelCase__ : List[Any] = 0 for i in range(1 , n + 1 ): lowerCamelCase__ : Tuple = max_rev[i] for j in range(1 , i + 1 ): lowerCamelCase__ : List[str] = max(_A , prices[j - 1] + max_rev[i - j] ) lowerCamelCase__ : Any = max_revenue_i return max_rev[n] def lowercase_ ( _A : int , _A : list ): """simple docstring""" if n < 0: lowerCamelCase__ : str = F"n must be greater than or equal to 0. Got n = {n}" raise ValueError(_A ) if n > len(_A ): lowerCamelCase__ : List[Any] = ( "Each integral piece of rod must have a corresponding price. " F"Got n = {n} but length of prices = {len(_A )}" ) raise ValueError(_A ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Optional[Any] = [6, 10, 12, 15, 20, 23] lowerCamelCase__ : Tuple = len(_A ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCamelCase__ : Any = 36 lowerCamelCase__ : Optional[Any] = top_down_cut_rod(_A , _A ) lowerCamelCase__ : int = bottom_up_cut_rod(_A , _A ) lowerCamelCase__ : Union[str, Any] = naive_cut_rod_recursive(_A , _A ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
5
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : Dict = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) lowerCamelCase__ : str = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase__ : Any = model(__lowerCamelCase )["last_hidden_state"] lowerCamelCase__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) # compare the actual values for a slice. lowerCamelCase__ : str = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
5
1
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) A : List[Any] = logging.getLogger() def lowercase_ ( _A : Dict ): """simple docstring""" lowerCamelCase__ : Optional[int] = {} lowerCamelCase__ : Optional[Any] = os.path.join(_A , "all_results.json" ) if os.path.exists(_A ): with open(_A , "r" ) as f: lowerCamelCase__ : Optional[Any] = json.load(_A ) else: raise ValueError(F"can't find {path}" ) return results A : Tuple = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _lowercase ( lowercase__): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' import xla_spawn lowerCamelCase__ : Optional[int] = self.get_auto_remove_tmp_dir() lowerCamelCase__ : str = f"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): lowerCamelCase__ : List[str] = time() xla_spawn.main() lowerCamelCase__ : int = time() lowerCamelCase__ : int = get_results(__lowerCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def lowerCAmelCase ( self : str ): '''simple docstring''' import xla_spawn lowerCamelCase__ : List[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split() with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ): xla_spawn.main()
5
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Any = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class _lowercase ( lowercase__): """simple docstring""" A__ = "blenderbot-small" A__ = ["past_key_values"] A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ): '''simple docstring''' lowerCamelCase__ : str = vocab_size lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Union[str, Any] = d_model lowerCamelCase__ : Optional[int] = encoder_ffn_dim lowerCamelCase__ : Dict = encoder_layers lowerCamelCase__ : Any = encoder_attention_heads lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim lowerCamelCase__ : str = decoder_layers lowerCamelCase__ : Optional[Any] = decoder_attention_heads lowerCamelCase__ : List[str] = dropout lowerCamelCase__ : List[Any] = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : Optional[Any] = activation_function lowerCamelCase__ : Dict = init_std lowerCamelCase__ : List[str] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = encoder_layers lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ : Union[str, Any] = {0: "batch"} lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"} lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase__ : Tuple = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase__ : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Union[str, Any] = super().outputs else: lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs if self.use_past: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers for i in range(__lowerCamelCase ): lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Generate decoder inputs lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1 lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads lowerCamelCase__ : str = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Optional[int] = decoder_seq_length + 3 lowerCamelCase__ : Dict = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase__ : List[Any] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(__lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase ), ) ) # TODO: test this. lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(__lowerCamelCase , __lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) ) return common_inputs def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase__ : str = seqlen + 2 lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads lowerCamelCase__ : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype lowerCamelCase__ : List[str] = torch.cat( [common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 ) lowerCamelCase__ : Tuple = [ (torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase ) ] return common_inputs def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowerCamelCase__ : str = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase ) lowerCamelCase__ : Dict = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) elif self.task == "causal-lm": lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) else: lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase ) return common_inputs def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) else: lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
5
1
from __future__ import annotations from math import gcd def lowercase_ ( _A : int , _A : int = 2 , _A : int = 1 , _A : int = 3 , ): """simple docstring""" if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_A : int , _A : int , _A : int ) -> int: return (pow(_A , 2 ) + step) % modulus for _ in range(_A ): # These track the position within the cycle detection logic. lowerCamelCase__ : Dict = seed lowerCamelCase__ : Optional[Any] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCamelCase__ : int = rand_fn(_A , _A , _A ) lowerCamelCase__ : Dict = rand_fn(_A , _A , _A ) lowerCamelCase__ : Optional[Any] = rand_fn(_A , _A , _A ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCamelCase__ : Union[str, Any] = gcd(hare - tortoise , _A ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCamelCase__ : List[str] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse A : Tuple = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) A : int = parser.parse_args() A : Optional[int] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'{args.num} is probably prime') else: A : Any = args.num // divisor print(f'{args.num} = {divisor} * {quotient}')
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : int = logging.get_logger(__name__) A : Optional[int] = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "xmod" def __init__( self : int , __lowerCamelCase : Any=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str="absolute" , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=("en_XX",) , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[Any] = attention_probs_dropout_prob lowerCamelCase__ : Any = max_position_embeddings lowerCamelCase__ : List[Any] = type_vocab_size lowerCamelCase__ : int = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : str = use_cache lowerCamelCase__ : Union[str, Any] = classifier_dropout lowerCamelCase__ : Any = pre_norm lowerCamelCase__ : Tuple = adapter_reduction_factor lowerCamelCase__ : Tuple = adapter_layer_norm lowerCamelCase__ : List[Any] = adapter_reuse_layer_norm lowerCamelCase__ : Dict = ln_before_adapter lowerCamelCase__ : List[Any] = list(__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = default_language class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Dict = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : List[str] = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : int = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
5
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
1
import pytest A : Tuple = "__dummy_dataset1__" A : str = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def lowercase_ ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowercase_ ( ): """simple docstring""" return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowercase_ ( _A : Optional[Any] , _A : List[str] , _A : Dict ): """simple docstring""" lowerCamelCase__ : int = dataset_loading_script_name lowerCamelCase__ : Optional[Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=_A ) lowerCamelCase__ : int = script_dir / F"{script_name}.py" with open(_A , "w" ) as f: f.write(_A ) return str(_A )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : Dict = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class _lowercase ( lowercase__): """simple docstring""" A__ = "ibert" def __init__( self : int , __lowerCamelCase : List[str]=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Union[str, Any]=0.0_2 , __lowerCamelCase : Any=1E-1_2 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict="none" , **__lowerCamelCase : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Optional[Any] = hidden_size lowerCamelCase__ : Optional[int] = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[str] = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : Any = type_vocab_size lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Tuple = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : List[str] = quant_mode lowerCamelCase__ : int = force_dequant class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 A : str = get_tests_dir("fixtures") class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = mock.Mock() lowerCamelCase__ : Optional[Any] = 500 lowerCamelCase__ : Optional[Any] = {} lowerCamelCase__ : List[str] = HTTPError lowerCamelCase__ : Tuple = {} # Download this model to make sure it's in the cache. lowerCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head: lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class _lowercase ( unittest.TestCase): """simple docstring""" @classmethod def lowerCAmelCase ( cls : Dict ): '''simple docstring''' lowerCamelCase__ : Tuple = TOKEN HfFolder.save_token(__lowerCamelCase ) @classmethod def lowerCAmelCase ( cls : List[str] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) lowerCamelCase__ : str = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCamelCase , repo_id="test-feature-extractor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) lowerCamelCase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( __lowerCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) lowerCamelCase__ : int = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() lowerCamelCase__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) lowerCamelCase__ : Any = AutoFeatureExtractor.from_pretrained( f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=__lowerCamelCase ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
5
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "roberta" def __init__( self : int , __lowerCamelCase : Dict=50265 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : Optional[int]=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : List[Any] = vocab_size lowerCamelCase__ : str = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Optional[Any] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Tuple = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : int = type_vocab_size lowerCamelCase__ : Any = initializer_range lowerCamelCase__ : Dict = layer_norm_eps lowerCamelCase__ : int = position_embedding_type lowerCamelCase__ : Any = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A : Union[str, Any] = logging.get_logger(__name__) A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _lowercase : """simple docstring""" A__ = field( default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)}) A__ = field( default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) A__ = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) A__ = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) A__ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) A__ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) A__ = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"}) A__ = field( default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) A__ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) A__ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class _lowercase ( lowercase__): """simple docstring""" A__ = "train" A__ = "dev" class _lowercase ( lowercase__): """simple docstring""" A__ = 42 A__ = 42 A__ = 42 A__ = 42 def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ): '''simple docstring''' lowerCamelCase__ : List[str] = args lowerCamelCase__ : Tuple = is_language_sensitive lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(__lowerCamelCase , __lowerCamelCase ): try: lowerCamelCase__ : List[str] = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase__ : str = mode # Load data features from cache or dataset file lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1" lowerCamelCase__ : List[str] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase__ : List[str] = cached_features_file + ".lock" with FileLock(__lowerCamelCase ): if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache: lowerCamelCase__ : str = time.time() lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase__ : Optional[Any] = self.old_features["features"] lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase ) lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase ) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir ) lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features( examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , ) lowerCamelCase__ : int = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = self.features[i] lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase__ : List[str] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
5
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Optional[int] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "data2vec-text" def __init__( self : Optional[Any] , __lowerCamelCase : Dict=30522 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : Union[str, Any]=1E-1_2 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = vocab_size lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : Tuple = num_hidden_layers lowerCamelCase__ : Union[str, Any] = num_attention_heads lowerCamelCase__ : Tuple = hidden_act lowerCamelCase__ : Union[str, Any] = intermediate_size lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : str = max_position_embeddings lowerCamelCase__ : Optional[Any] = type_vocab_size lowerCamelCase__ : Optional[Any] = initializer_range lowerCamelCase__ : str = layer_norm_eps lowerCamelCase__ : Union[str, Any] = position_embedding_type lowerCamelCase__ : List[Any] = use_cache lowerCamelCase__ : int = classifier_dropout class _lowercase ( lowercase__): """simple docstring""" @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
5
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging A : Tuple = logging.get_logger(__name__) A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED A : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } A : Union[str, Any] = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : str = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowerCamelCase__ : Any = bs[:] lowerCamelCase__ : Union[str, Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ : Any = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def lowercase_ ( _A : Any ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = set() lowerCamelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Any = char return pairs class _lowercase ( lowercase__): """simple docstring""" A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ["input_ids", "attention_mask"] def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token lowerCamelCase__ : Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token lowerCamelCase__ : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token lowerCamelCase__ : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token lowerCamelCase__ : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token super().__init__( errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , ) with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(__lowerCamelCase ) lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase__ : Union[str, Any] = errors # how to handle errors in decoding lowerCamelCase__ : List[Any] = bytes_to_unicode() lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("\n" )[1:-1] lowerCamelCase__ : str = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ : Optional[Any] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowerCamelCase__ : List[Any] = {} lowerCamelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ : List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ : Union[str, Any] = tuple(__lowerCamelCase ) lowerCamelCase__ : Tuple = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowerCamelCase__ : str = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : int = 0 while i < len(__lowerCamelCase ): try: lowerCamelCase__ : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : List[str] = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(__lowerCamelCase ) lowerCamelCase__ : str = new_word if len(__lowerCamelCase ) == 1: break else: lowerCamelCase__ : List[str] = get_pairs(__lowerCamelCase ) lowerCamelCase__ : Optional[int] = " ".join(__lowerCamelCase ) lowerCamelCase__ : Dict = word return word def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[Any] = [] for token in re.findall(self.pat , __lowerCamelCase ): lowerCamelCase__ : Union[str, Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Any , __lowerCamelCase : int ): '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.decoder.get(__lowerCamelCase ) def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = "".join(__lowerCamelCase ) lowerCamelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : List[Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ : Union[str, Any] = os.path.join( __lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" ) lowerCamelCase__ : Tuple = 0 with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) lowerCamelCase__ : List[Any] = token_index writer.write(" ".join(__lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ : List[str] = [self.cls_token_id] lowerCamelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowerCamelCase__ : Any = [self.sep_token_id] lowerCamelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False , **__lowerCamelCase : List[str] ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ : Dict = " " + text return (text, kwargs) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' lowerCamelCase__ : str = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase__ : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase ) if needs_to_be_padded: lowerCamelCase__ : Dict = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase__ : Optional[int] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase__ : Union[str, Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
5
1
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowercase_ ( _A : dict ): """simple docstring""" return (data["data"], data["target"]) def lowercase_ ( _A : np.ndarray , _A : np.ndarray ): """simple docstring""" lowerCamelCase__ : Union[str, Any] = XGBClassifier() classifier.fit(_A , _A ) return classifier def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : int = load_iris() lowerCamelCase__ , lowerCamelCase__ : List[str] = data_handling(_A ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = train_test_split( _A , _A , test_size=0.25 ) lowerCamelCase__ : List[Any] = iris["target_names"] # Create an XGBoost Classifier from the training data lowerCamelCase__ : str = xgboost(_A , _A ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _A , _A , _A , display_labels=_A , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
5
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( lowercase__ , unittest.TestCase): """simple docstring""" A__ = KandinskyVaaImgaImgPipeline A__ = ["image_embeds", "negative_image_embeds", "image"] A__ = [ "image_embeds", "negative_image_embeds", "image", ] A__ = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A__ = False @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return 100 @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : Optional[Any] = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase__ : Tuple = UNetaDConditionModel(**__lowerCamelCase ) return model @property def lowerCAmelCase ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : List[str] = self.dummy_unet lowerCamelCase__ : Optional[Any] = self.dummy_movq lowerCamelCase__ : Optional[int] = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCamelCase__ : List[Any] = DDIMScheduler(**__lowerCamelCase ) lowerCamelCase__ : Tuple = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : int=0 ): '''simple docstring''' lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCamelCase ) # create init_image lowerCamelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) lowerCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((256, 256) ) if str(__lowerCamelCase ).startswith("mps" ): lowerCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase ) else: lowerCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCamelCase__ : Tuple = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def lowerCAmelCase ( self : int ): '''simple docstring''' lowerCamelCase__ : Dict = "cpu" lowerCamelCase__ : str = self.get_dummy_components() lowerCamelCase__ : Optional[int] = self.pipeline_class(**__lowerCamelCase ) lowerCamelCase__ : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) lowerCamelCase__ : List[str] = output.images lowerCamelCase__ : Optional[Any] = pipe( **self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0] lowerCamelCase__ : int = image[0, -3:, -3:, -1] lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase__ : str = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowercase ( unittest.TestCase): """simple docstring""" def lowerCAmelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) lowerCamelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase__ : Any = "A red cartoon frog, 4k" lowerCamelCase__ : str = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCamelCase ) lowerCamelCase__ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa ) lowerCamelCase__ : str = pipeline.to(__lowerCamelCase ) pipeline.set_progress_bar_config(disable=__lowerCamelCase ) lowerCamelCase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = pipe_prior( __lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase__ : Optional[Any] = pipeline( image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , ) lowerCamelCase__ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
5
1
from math import factorial A : Tuple = {str(d): factorial(d) for d in range(10)} def lowercase_ ( _A : int ): """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(_A ) ) def lowercase_ ( ): """simple docstring""" lowerCamelCase__ : Any = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , _A ) if sum_of_digit_factorial(_A ) == i ) if __name__ == "__main__": print(f'{solution() = }')
5
def lowercase_ ( _A : int , _A : int ): """simple docstring""" if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowerCamelCase__ : List[str] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = str(bin(_A ) )[2:] # remove the leading "0b" lowerCamelCase__ : List[Any] = max(len(_A ) , len(_A ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
5
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class _lowercase : """simple docstring""" A__ = 42 A__ = 42 class _lowercase : """simple docstring""" def __init__( self : Tuple , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )] lowerCamelCase__ : int = size def __getitem__( self : Dict , __lowerCamelCase : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return self._size def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) ) def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' lowerCamelCase__ : int = deque([start_vertex] ) lowerCamelCase__ : list[int | None] = [None] * self.size lowerCamelCase__ : Union[str, Any] = 0 while queue: lowerCamelCase__ : str = queue.popleft() lowerCamelCase__ : str = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCamelCase__ : int = current_distance + edge.weight lowerCamelCase__ : int = distances[edge.destination_vertex] if ( isinstance(__lowerCamelCase , __lowerCamelCase ) and new_distance >= dest_vertex_distance ): continue lowerCamelCase__ : List[str] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
5
import os from pathlib import Path def lowercase_ ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCamelCase__ : Any = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" lowerCamelCase__ : Optional[int] = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
5
1