code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case : int = logging.get_logger(__name__)
class A ( _lowerCAmelCase ):
def __init__( self , *snake_case_ , **snake_case_ ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( UpperCamelCase__ ):
__UpperCAmelCase : str = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : int = """Pix2StructImageProcessor"""
__UpperCAmelCase : Optional[Any] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ) -> Dict:
_a = False
super().__init__(_a , _a )
def __call__( self , snake_case_=None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 2_0_4_8 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
_a = self.tokenizer
_a = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_a = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a )
else:
# add pixel_values and bbox
_a = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a )
if text is not None and not self.image_processor.is_vqa:
_a = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
_a = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
_a = text_encoding.pop("input_ids" )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
from manim import *
class A ( SCREAMING_SNAKE_CASE_ ):
def __lowerCAmelCase ( self ) -> List[str]:
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_a = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_a = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_a = Text("CPU" , font_size=2_4 )
_a = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
_a = [mem.copy() for i in range(1 )]
_a = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_a = Text("GPU" , font_size=2_4 )
_a = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_a = Text("Model" , font_size=2_4 )
_a = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
_a = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=2_4 , )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
_a = []
_a = []
_a = []
for i, rect in enumerate(UpperCamelCase__ ):
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
_a = 0.46 / 4
_a = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class A ( lowerCAmelCase__ ):
def __init__( self , *snake_case_ , **snake_case_ ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A :
__UpperCAmelCase : Tuple = None
def __lowerCAmelCase ( self ) -> int:
_a = self.feature_extraction_class(**self.feat_extract_dict )
_a = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__A , "feat_extract.json" )
feat_extract_first.to_json_file(__A )
_a = self.feature_extraction_class.from_json_file(__A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
_a = self.feature_extraction_class.from_pretrained(__A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowerCAmelCase ( self ) -> int:
_a = self.feature_extraction_class()
self.assertIsNotNone(__A )
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCamelCase__ ):
__UpperCAmelCase : Dict = """segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=2_5_6 , snake_case_=2_5_5 , **snake_case_ , ) -> List[Any]:
super().__init__(**__A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __A , )
_a = num_channels
_a = num_encoder_blocks
_a = depths
_a = sr_ratios
_a = hidden_sizes
_a = patch_sizes
_a = strides
_a = mlp_ratios
_a = num_attention_heads
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = classifier_dropout_prob
_a = initializer_range
_a = drop_path_rate
_a = layer_norm_eps
_a = decoder_hidden_size
_a = kwargs.get("reshape_last_stage" , __A )
_a = semantic_loss_ignore_index
class A ( UpperCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
@property
def __lowerCAmelCase ( self ) -> int:
return 1_2
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import argparse
__snake_case : Any = '''docs/source/_static/js/custom.js'''
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
with open(_UpperCAmelCase, encoding="utf-8", newline="\n" ) as f:
_a = f.readlines()
_a = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
_a = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_UpperCAmelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
__snake_case : Dict = parser.parse_args()
update_custom_js(args.version)
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
import cva
import numpy as np
class A :
def __init__( self , snake_case_ , snake_case_ ) -> int:
if k in (0.04, 0.06):
_a = k
_a = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> str:
return str(self.k )
def __lowerCAmelCase ( self , snake_case_ ) -> tuple[cva.Mat, list[list[int]]]:
_a = cva.imread(__a , 0 )
_a = img.shape
_a = []
_a = img.copy()
_a = cva.cvtColor(__a , cva.COLOR_GRAY2RGB )
_a = np.gradient(__a )
_a = dx**2
_a = dy**2
_a = dx * dy
_a = 0.04
_a = self.window_size // 2
for y in range(__a , h - offset ):
for x in range(__a , w - offset ):
_a = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a = (wxx * wyy) - (wxy**2)
_a = wxx + wyy
_a = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = HarrisCorner(0.04, 3)
__snake_case , __snake_case : Optional[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__snake_case : Any = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__snake_case : Optional[Any] = "UperNetConfig"
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 0 , snake_case_ = False , snake_case_ = 1 , ) -> None:
super().__init__()
_a = nn.Convad(
in_channels=__A , out_channels=__A , kernel_size=__A , padding=__A , bias=__A , dilation=__A , )
_a = nn.BatchNormad(__A )
_a = nn.ReLU()
def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor:
_a = self.conv(__A )
_a = self.batch_norm(__A )
_a = self.activation(__A )
return output
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ) -> None:
super().__init__()
_a = [
nn.AdaptiveAvgPoolad(__A ),
UperNetConvModule(__A , __A , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__A ) , __A )
def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor:
_a = input
for layer in self.layers:
_a = layer(__A )
return hidden_state
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> None:
super().__init__()
_a = pool_scales
_a = align_corners
_a = in_channels
_a = channels
_a = []
for i, pool_scale in enumerate(__A ):
_a = UperNetPyramidPoolingBlock(pool_scale=__A , in_channels=__A , channels=__A )
self.blocks.append(__A )
self.add_module(str(__A ) , __A )
def __lowerCAmelCase ( self , snake_case_ ) -> List[torch.Tensor]:
_a = []
for ppm in self.blocks:
_a = ppm(__A )
_a = nn.functional.interpolate(
__A , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__A )
return ppm_outs
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ) -> Optional[int]:
super().__init__()
_a = config
_a = config.pool_scales # e.g. (1, 2, 3, 6)
_a = in_channels
_a = config.hidden_size
_a = False
_a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_a = nn.ModuleList()
_a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_a = UperNetConvModule(__A , self.channels , kernel_size=1 )
_a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__A )
self.fpn_convs.append(__A )
_a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowerCAmelCase ( self ) -> List[str]:
self.apply(self._init_weights )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if isinstance(__A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = inputs[-1]
_a = [x]
psp_outs.extend(self.psp_modules(__A ) )
_a = torch.cat(__A , dim=1 )
_a = self.bottleneck(__A )
return output
def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor:
# build laterals
_a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__A ) )
# build top-down path
_a = len(__A )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_a = laterals[i - 1].shape[2:]
_a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__A , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_a = torch.cat(__A , dim=1 )
_a = self.fpn_bottleneck(__A )
_a = self.classifier(__A )
return output
class A ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ = 2 , snake_case_ = 3 , snake_case_ = 1 ) -> None:
super().__init__()
_a = config
_a = config.auxiliary_in_channels
_a = config.auxiliary_channels
_a = config.auxiliary_num_convs
_a = config.auxiliary_concat_input
_a = in_index
_a = (kernel_size // 2) * dilation
_a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__A , padding=__A , dilation=__A ) )
if self.num_convs == 0:
_a = nn.Identity()
else:
_a = nn.Sequential(*__A )
if self.concat_input:
_a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__A , padding=kernel_size // 2 )
_a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.apply(self._init_weights )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
if isinstance(__A , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor:
# just take the relevant feature maps
_a = encoder_hidden_states[self.in_index]
_a = self.convs(__A )
if self.concat_input:
_a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_a = self.classifier(__A )
return output
class A ( a ):
__UpperCAmelCase : List[str] = UperNetConfig
__UpperCAmelCase : int = """pixel_values"""
__UpperCAmelCase : Dict = True
def __lowerCAmelCase ( self , snake_case_ ) -> str:
if isinstance(__A , __A ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowerCAmelCase ( self ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Optional[int]:
if isinstance(__A , __A ):
_a = value
__snake_case : Any = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__snake_case : Union[str, Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , a , )
class A ( a ):
def __init__( self , snake_case_ ) -> List[Any]:
super().__init__(__A )
_a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_a = UperNetHead(__A , in_channels=self.backbone.channels )
_a = UperNetFCNHead(__A ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__A , config_class=_CONFIG_FOR_DOC )
def __lowerCAmelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a = output_attentions if output_attentions is not None else self.config.output_attentions
_a = self.backbone.forward_with_filtered_kwargs(
__A , output_hidden_states=__A , output_attentions=__A )
_a = outputs.feature_maps
_a = self.decode_head(__A )
_a = nn.functional.interpolate(__A , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__A )
_a = None
if self.auxiliary_head is not None:
_a = self.auxiliary_head(__A )
_a = nn.functional.interpolate(
__A , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__A )
_a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_a = loss_fct(__A , __A )
_a = loss_fct(__A , __A )
_a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_a = (logits,) + outputs[1:]
else:
_a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__A , logits=__A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( __snake_case , unittest.TestCase ):
__UpperCAmelCase : Tuple = LEDTokenizer
__UpperCAmelCase : Optional[int] = LEDTokenizerFast
__UpperCAmelCase : Dict = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_a = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_a = {"""unk_token""": """<unk>"""}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
def __lowerCAmelCase ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __lowerCAmelCase ( self ) -> List[str]:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_a = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="pt" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(_lowercase , padding=_lowercase , return_tensors="pt" )
self.assertIn("input_ids" , _lowercase )
self.assertIn("attention_mask" , _lowercase )
self.assertNotIn("labels" , _lowercase )
self.assertNotIn("decoder_attention_mask" , _lowercase )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=_lowercase , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=_lowercase , truncation=_lowercase , return_tensors="pt" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_a = ["""A long paragraph for summarization."""]
_a = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(_lowercase , return_tensors="pt" )
_a = tokenizer(text_target=_lowercase , return_tensors="pt" )
_a = inputs["""input_ids"""]
_a = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ["""Summary of the text.""", """Another summary."""]
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(_lowercase , padding=_lowercase )
_a = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
_a = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["global_attention_mask"] , _lowercase )
def __lowerCAmelCase ( self ) -> Dict:
pass
def __lowerCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_a = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_a = """A, <mask> AllenNLP sentence."""
_a = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
_a = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Any = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class A ( a ):
__UpperCAmelCase : Tuple = '''table-transformer'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , snake_case_=True , snake_case_=None , snake_case_=3 , snake_case_=1_0_0 , snake_case_=6 , snake_case_=2_0_4_8 , snake_case_=8 , snake_case_=6 , snake_case_=2_0_4_8 , snake_case_=8 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_="relu" , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , snake_case_=False , snake_case_="sine" , snake_case_="resnet50" , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=1 , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["""resnet"""](out_features=["stage4"] )
elif isinstance(A__ , A__ ):
_a = backbone_config.get("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(A__ )
# set timm attributes to None
_a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.d_model
class A ( a ):
__UpperCAmelCase : Dict = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self ) -> str:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __lowerCAmelCase ( self ) -> Any:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> Tuple:
return 1_2
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case : Dict = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__snake_case : List[str] = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__snake_case : Dict = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class A ( __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[int] = DistilBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _a ) != do_lower_case
or normalizer_state.get("strip_accents" , _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _a ) != tokenize_chinese_chars
):
_a = getattr(_a , normalizer_state.pop("type" ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**_a )
_a = do_lower_case
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> List[str]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> str:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Union[str, Any]:
_a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : list[str] | None = None ):
_a = word_bank or []
# create a table
_a = len(_lowerCamelCase ) + 1
_a = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
_a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
_a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
from typing import Any
def _lowercase ( lowerCamelCase__ : Optional[int] ):
if not input_list:
return []
_a = [input_list.count(lowerCamelCase__ ) for value in input_list]
_a = max(lowerCamelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _lowercase ( ):
_a = torch.nn.Linear(2, 4 )
_a = torch.optim.AdamW(model.parameters(), lr=1.0 )
_a = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_, max_lr=0.01, steps_per_epoch=2, epochs=1 )
_a = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_a = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _lowercase ( lowerCamelCase__ : List[str] ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _lowercase ( lowerCamelCase__ : Any ):
_a = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class A ( _UpperCAmelCase ):
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_a = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
_a = Accelerator(cpu=lowercase__ )
def __lowerCAmelCase ( self ) -> Any:
_a = Accelerator()
_a = GradientState()
assert state.num_steps == 1
_a = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_a = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCAmelCase ( self ) -> Dict:
_a = Accelerator()
_a = create_components()
(
_a
) = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCAmelCase ( self ) -> int:
_a = Accelerator()
_a = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*snake_case_ , **snake_case_ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_a = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCAmelCase ( self ) -> Any:
_a = Accelerator()
_a = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_a = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1E-3 )
def __lowerCAmelCase ( self ) -> int:
_a = Accelerator()
_a = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_a = get_signature(lowercase__ )
# saving hook
def save_config(snake_case_ , snake_case_ , snake_case_ ):
_a = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(snake_case_ , snake_case_ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
_a = json.load(lowercase__ )
_a = config["class_name"]
_a = accelerator.register_save_state_pre_hook(lowercase__ )
_a = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_a = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_a = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = Accelerator()
_a = create_components()
_a = None
# This should work
_a = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = Accelerator()
_a = create_components()
_a = [1, 2, 3]
# This should work
_a = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCAmelCase ( self ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
_a = Accelerator()
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_a = infer_auto_device_map(lowercase__ )
_a = "cpu"
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
_a = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ) -> int:
from transformers import AutoModelForCausalLM
_a = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_a = infer_auto_device_map(lowercase__ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
_a = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
_a = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_a = infer_auto_device_map(lowercase__ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCAmelCase ( self ) -> Dict:
_a = torch.nn.Linear(1_0 , 1_0 )
_a = torch.optim.SGD(model.parameters() , lr=0.01 )
_a = Accelerator(cpu=lowercase__ )
_a = accelerator.prepare(lowercase__ )
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
__snake_case : List[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class A ( __A ):
__UpperCAmelCase : Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase : Optional[Any] = """nezha"""
def __init__( self , snake_case_=2_1_1_2_8 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=6_4 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-1_2 , snake_case_=0.1 , snake_case_=0 , snake_case_=2 , snake_case_=3 , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = max_relative_position
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = classifier_dropout
_a = use_cache
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowercase ( lowerCamelCase__ : Tuple ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( lowerCamelCase__ : int ):
_a = str(__snake_case )
_a = [n]
for i in range(1, len(__snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowercase ( lowerCamelCase__ : List[str] ):
if len(str(__snake_case ) ) > 3:
if not is_prime(int(str(__snake_case )[-3:] ) ) or not is_prime(int(str(__snake_case )[:3] ) ):
return False
return True
def _lowercase ( lowerCamelCase__ : Optional[Any] = 11 ):
_a = []
_a = 13
while len(__snake_case ) != count:
if validate(__snake_case ):
_a = list_truncated_nums(__snake_case )
if all(is_prime(__snake_case ) for i in list_nums ):
list_truncated_primes.append(__snake_case )
num += 2
return list_truncated_primes
def _lowercase ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(11)) = }''')
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any] ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
_a = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_a = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Any ):
if not numbers:
return 0
if not isinstance(lowerCAmelCase_, (list, tuple) ) or not all(
isinstance(lowerCAmelCase_, lowerCAmelCase_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
_a = numbers[0]
for i in range(1, len(lowerCAmelCase_ ) ):
# update the maximum and minimum subarray products
_a = numbers[i]
if number < 0:
_a = min_till_now, max_till_now
_a = max(lowerCAmelCase_, max_till_now * number )
_a = min(lowerCAmelCase_, min_till_now * number )
# update the maximum product found till now
_a = max(lowerCAmelCase_, lowerCAmelCase_ )
return max_prod
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case : Tuple = None
try:
import msvcrt
except ImportError:
__snake_case : str = None
try:
import fcntl
except ImportError:
__snake_case : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case : List[Any] = OSError
# Data
# ------------------------------------------------
__snake_case : List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__snake_case : Optional[Any] = "3.0.12"
__snake_case : Any = None
def _lowercase ( ):
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class A ( a ):
def __init__( self , snake_case_ ) -> int:
_a = lock_file
return None
def __str__( self ) -> str:
_a = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class A :
def __init__( self , snake_case_ ) -> List[str]:
_a = lock
return None
def __enter__( self ) -> Dict:
return self.lock
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
self.lock.release()
return None
class A :
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Optional[int]:
_a = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def __lowerCAmelCase ( self ) -> Any:
return self._lock_file
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def __lowerCAmelCase ( self , snake_case_ ) -> str:
_a = float(snake_case_ )
return None
def __lowerCAmelCase ( self ) -> Tuple:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
raise NotImplementedError()
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self._lock_file_fd is not None
def __lowerCAmelCase ( self , snake_case_=None , snake_case_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCAmelCase ( self , snake_case_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
_a = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[str]:
self.acquire()
return self
def __exit__( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
self.release()
return None
def __del__( self ) -> Dict:
self.release(force=snake_case_ )
return None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> str:
_a = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
_a = os.path.dirname(snake_case_ )
_a = str(hash(snake_case_ ) )
_a = filename[: max_length - len(snake_case_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class A ( a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> Optional[int]:
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def __lowerCAmelCase ( self ) -> Tuple:
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
_a = fd
return None
def __lowerCAmelCase ( self ) -> Dict:
_a = self._lock_file_fd
_a = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class A ( a ):
def __init__( self , snake_case_ , snake_case_=-1 , snake_case_=None ) -> List[Any]:
_a = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
_a = fd
return None
def __lowerCAmelCase ( self ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
_a = fd
return None
def __lowerCAmelCase ( self ) -> List[str]:
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case : Union[str, Any] = None
if msvcrt:
__snake_case : Any = WindowsFileLock
elif fcntl:
__snake_case : str = UnixFileLock
else:
__snake_case : List[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
import os
from datetime import datetime as dt
from github import Github
__snake_case : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowercase ( ):
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/diffusers" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted(issue.get_comments(), key=lambda lowerCamelCase__ : i.created_at, reverse=_A )
_a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case : Any = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case : Union[str, Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case : List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Union[str, Any] ):
_a = len([g for position, g in enumerate(A_ ) if g == main_target[position]] )
return (item, float(A_ ))
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict ):
_a = random.randint(0, len(A_ ) - 1 )
_a = parent_a[:random_slice] + parent_a[random_slice:]
_a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Tuple ):
_a = list(A_ )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
_a = random.choice(A_ )
return "".join(A_ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Union[str, Any], ):
_a = []
# Generate more children proportionally to the fitness score.
_a = int(parent_a[1] * 100 ) + 1
_a = 10 if child_n >= 10 else child_n
for _ in range(A_ ):
_a = population_score[random.randint(0, A_ )][0]
_a , _a = crossover(parent_a[0], A_ )
# Append new string to the population list.
pop.append(mutate(A_, A_ ) )
pop.append(mutate(A_, A_ ) )
return pop
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[Any] = True ):
if N_POPULATION < N_SELECTED:
_a = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(A_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(A_ )
# Generate random starting population.
_a = []
for _ in range(A_ ):
population.append("".join([random.choice(A_ ) for i in range(len(A_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a = [evaluate(A_, A_ ) for item in population]
# Check if there is a matching evolution.
_a = sorted(A_, key=lambda lowerCamelCase__ : x[1], reverse=A_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A_ )
# Normalize population score to be between 0 and 1.
_a = [
(item, score / len(A_ )) for item, score in population_score
]
# This is selection
for i in range(A_ ):
population.extend(select(population_score[int(A_ )], A_, A_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A_ ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case : List[str] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__snake_case : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"
)
__snake_case : Any = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A ( snake_case__ , unittest.TestCase ):
__UpperCAmelCase : Any = WavaVecaPhonemeCTCTokenizer
__UpperCAmelCase : Union[str, Any] = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
_a = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(" " )
_a = dict(zip(_A , range(len(_A ) ) ) )
_a = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False , snake_case_=2_0 , snake_case_=5 ) -> int:
_a = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )]
_a = list(filter(lambda snake_case_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
_a = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
_a = toks + toks
# toks_str = [t[1] for t in toks]
_a = [t[0] for t in toks]
# Ensure consistency
_a = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
_a = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
_a = ' ' + output_txt
_a = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self , **snake_case_ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
_a = tokenizer("m xxx ɪ" , do_phonemize=_A ).input_ids
self.assertEqual(_A , [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
_a = tokenizer("m aaa ɪ ccc" , do_phonemize=_A ).input_ids
self.assertEqual(_A , [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
_a = tokenizer("maɪ c" , do_phonemize=_A ).input_ids
self.assertEqual(_A , [3, 2_0_0] ) # mai should be <unk> (=3)
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
self.assertEqual(_A , "h ə l oʊ h aʊ ɑːɹ j uː" )
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
_a = tokenizer.decode(tokenizer(_A ).input_ids )
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_a = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
_a = tokenizer.decode(sample_ids[0] )
_a = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def __lowerCAmelCase ( self ) -> str:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
self.assertEqual(_A , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
_a = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
_a = tokenizer.decode(sample_ids[0] )
_a = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
_a = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A )
_a = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def __lowerCAmelCase ( self ) -> str:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
_a = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
_a = 'Hello how are you'
_a = tokenizer.phonemize(_A , phonemizer_lang="en-us" )
_a = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _A )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_A )
_a = 'Hello how are you'
_a = tokenizer(_A , phonemizer_lang="en-us" ).input_ids
_a = tokenizer(_A , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_A , _A )
_a = tokenizer.decode(_A )
_a = tokenizer.decode(_A )
self.assertEqual(_A , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_A , "ɛ l o h aʊ a ʁ j u" )
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
_a = 'Hello how Are you'
_a = 'hello how are you'
_a = tokenizer(_A ).input_ids
_a = tokenizer(_A ).input_ids
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
_a = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
_a = tokenizer.batch_decode(_A )
self.assertEqual(_A , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ ) -> List[str]:
_a = [d[key] for d in offsets]
return retrieved_list
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_a = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
_a = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_A , _A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def __lowerCAmelCase ( self ) -> int:
_a = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(snake_case_ , snake_case_ ):
self.assertTrue(isinstance(_A , _A ) )
self.assertTrue(isinstance(outputs_list[0] , _A ) )
# transform list to ModelOutput
_a = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(snake_case_ , snake_case_ ):
if isinstance(_A , _A ):
[recursive_check(_A , _A ) for la, la in zip(_A , _A )]
self.assertEqual(_A , _A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
_a = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_a = tokenizer.batch_decode(_A , output_char_offsets=_A )
_a = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids]
check_list_tuples_equal(_A , _A )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self ) -> str:
_a = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_a = tokenizer.vocab_size
_a = len(_A )
self.assertNotEqual(_A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_a = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_a = tokenizer.add_tokens(_A )
_a = tokenizer.vocab_size
_a = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size + len(_A ) )
_a = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_a = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_a = tokenizer.add_special_tokens(_A )
_a = tokenizer.vocab_size
_a = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size_a + len(_A ) )
_a = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def __lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip("The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode." )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_a = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_a = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
_a = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(output["text"] , _A )
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : str=0.9_99, lowerCamelCase__ : int="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : Any ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(__snake_case ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ), __snake_case ) )
return torch.tensor(__snake_case, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = False , snake_case_ = False , snake_case_ = 1.0 , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_a = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
_a = use_karras_sigmas
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> List[Any]:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(__a ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(__a )
_a = self.sigmas[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> List[Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = np.log(__a )
_a = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
_a = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
_a = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(__a ).to(device=__a )
_a = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.from_numpy(__a )
_a = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
_a = timesteps.to(__a , dtype=torch.floataa )
else:
_a = timesteps.to(device=__a )
# empty dt and derivative
_a = None
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(__a )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
_a = np.log(__a )
# get distribution
_a = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_a = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = log_sigmas[low_idx]
_a = log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = np.clip(__a , 0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> torch.FloatTensor:
_a = in_sigmas[-1].item()
_a = in_sigmas[0].item()
_a = 7.0 # 7.0 is the value used in the paper
_a = np.linspace(0 , 1 , __a )
_a = sigma_min ** (1 / rho)
_a = sigma_max ** (1 / rho)
_a = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self ) -> int:
return self.dt is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(__a )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_next
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_next
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_a = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
_a = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_next - sigma_hat
# store for 2nd order step
_a = derivative
_a = dt
_a = sample
else:
# 2. 2nd order / Heun's method
_a = (sample - pred_original_sample) / sigma_next
_a = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_a = self.dt
_a = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_a = None
_a = None
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(__a , __a ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( lowercase_ ):
__UpperCAmelCase : Any = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : List[str] = """Pix2StructImageProcessor"""
__UpperCAmelCase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ) -> List[str]:
_a = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self , snake_case_=None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 2_0_4_8 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> List[Any]:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
_a = self.tokenizer
_a = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_a = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , **lowerCamelCase_ )
else:
# add pixel_values and bbox
_a = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , header_text=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
_a = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if "attention_mask" in text_encoding:
_a = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
_a = text_encoding.pop("input_ids" )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Any:
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__snake_case : int = 637_8137.0
__snake_case : List[Any] = 635_6752.31_4245
__snake_case : Dict = 637_8137
def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : float, lowerCamelCase__ : float, lowerCamelCase__ : float ):
_a = (AXIS_A - AXIS_B) / AXIS_A
_a = atan((1 - flattening) * tan(radians(A__ ) ) )
_a = atan((1 - flattening) * tan(radians(A__ ) ) )
_a = radians(A__ )
_a = radians(A__ )
# Equation
_a = sin((phi_a - phi_a) / 2 )
_a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_a = sqrt(sin_sq_phi + (cos(A__ ) * cos(A__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
__snake_case : int = "Muhammad Umer Farooq"
__snake_case : Optional[Any] = "MIT"
__snake_case : Dict = "1.0.0"
__snake_case : Optional[Any] = "Muhammad Umer Farooq"
__snake_case : Tuple = "[email protected]"
__snake_case : Tuple = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A ( _UpperCamelCase ):
def __init__( self , snake_case_ ) -> List[Any]:
super().__init__()
_a = []
_a = domain
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[Any]:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_a = parse.urljoin(self.domain , snake_case_ )
self.urls.append(snake_case_ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
return ".".join(get_sub_domain_name(lowerCamelCase__ ).split("." )[-2:] )
def _lowercase ( lowerCamelCase__ : List[Any] ):
return parse.urlparse(lowerCamelCase__ ).netloc
def _lowercase ( lowerCamelCase__ : Optional[Any] = "https://github.com" ):
_a = get_domain_name(lowerCamelCase__ )
# Initialize the parser
_a = Parser(lowerCamelCase__ )
try:
# Open URL
_a = requests.get(lowerCamelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_a = requests.get(lowerCamelCase__ )
# Get the valid email.
_a = re.findall("[a-zA-Z0-9]+@" + domain, read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Dict = emails_from_url("https://github.com")
print(f'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__snake_case : str = None
__snake_case : str = logging.get_logger(__name__)
__snake_case : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__snake_case : Dict = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__snake_case : Optional[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class A ( a ):
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[str] = TaTokenizer
__UpperCAmelCase : List[str] = []
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=1_0_0 , snake_case_=None , **snake_case_ , ) -> List[Any]:
if extra_ids > 0 and additional_special_tokens is None:
_a = [F'''<extra_id_{i}>''' for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_a = len(set(filter(lambda snake_case_ : bool("extra_id_" in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_a = vocab_file
_a = False if not self.vocab_file else True
_a = extra_ids
@staticmethod
def __lowerCAmelCase ( snake_case_ , snake_case_ , snake_case_ ) -> Dict:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_a = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_a = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self ) -> int:
return list(
set(filter(lambda snake_case_ : bool(re.search(R"<extra_id_\d+>" , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowercase ( lowerCamelCase__ : int ):
return EnvironmentCommand()
def _lowercase ( lowerCamelCase__ : Tuple ):
return EnvironmentCommand(args.accelerate_config_file )
class A ( __lowerCAmelCase ):
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCAmelCase_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , snake_case_ , *snake_case_ ) -> None:
_a = accelerate_config_file
def __lowerCAmelCase ( self ) -> Dict:
_a = "not installed"
if is_safetensors_available():
import safetensors
_a = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_a = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_a = "not installed"
_a = _a = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_a = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
_a = load_config_from_file(self._accelerate_config_file ).to_dict()
_a = (
"\n".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else F'''\t{accelerate_config}'''
)
_a = "not installed"
_a = "NA"
if is_torch_available():
import torch
_a = torch.__version__
_a = torch.cuda.is_available()
_a = "not installed"
_a = "NA"
if is_tf_available():
import tensorflow as tf
_a = tf.__version__
try:
# deprecated in v2.1
_a = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_a = bool(tf.config.list_physical_devices("GPU" ) )
_a = "not installed"
_a = "not installed"
_a = "not installed"
_a = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_a = flax.__version__
_a = jax.__version__
_a = jaxlib.__version__
_a = jax.lib.xla_bridge.get_backend().platform
_a = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F'''{safetensors_version}''',
"Accelerate version": F'''{accelerate_version}''',
"Accelerate config": F'''{accelerate_config_str}''',
"PyTorch version (GPU?)": F'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": F'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": F'''{flax_version} ({jax_backend})''',
"Jax version": F'''{jax_version}''',
"JaxLib version": F'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str] ):
if gpta_config_file == "":
_a = GPTaConfig()
else:
_a = GPTaConfig.from_json_file(lowerCamelCase__ )
_a = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Save pytorch-model
_a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), lowerCamelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__snake_case : Optional[int] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Any ):
_a = len(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(i + 1, lowerCamelCase__ ):
if numbers[j] < numbers[i]:
_a = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__snake_case : Dict = input("Enter numbers separated by a comma:\n").strip()
__snake_case : List[str] = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__snake_case : Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict ):
for attribute in key.split("." ):
_a = getattr(__a, __a )
if weight_type is not None:
_a = getattr(__a, __a ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any ):
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_a = None
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
__a, __a, __a, __a, hf_model.config.feat_extract_norm == "group", )
_a = True
elif name.split("." )[0] == "proj":
_a = fairseq_model.proj
_a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(__a )[0].split("." )[-2]
_a = mapped_key.replace("*", __a )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
else:
_a = None
set_recursively(__a, __a, __a, __a, __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict, lowerCamelCase__ : int ):
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
def _lowercase ( lowerCamelCase__ : int ):
_a , _a = emb.weight.shape
_a = nn.Linear(__a, __a, bias=__a )
_a = emb.weight.data
return lin_layer
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
with open(__a, "r", encoding="utf-8" ) as f:
_a = f.readlines()
_a = [line.split(" " )[0] for line in lines]
_a = len(__a )
_a = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__a, range(4, num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : int, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict, lowerCamelCase__ : Dict, lowerCamelCase__ : str, lowerCamelCase__ : List[str], ):
_a = WavaVecaConfig.from_pretrained(__a )
_a = SpeechaTextaConfig.from_pretrained(
__a, vocab_size=__a, decoder_layers=__a, do_stable_layer_norm=__a )
_a = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__a, return_attention_mask=__a, )
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
_a = model[0].eval()
# set weights for wav2vec2 encoder
_a = WavaVecaModel(__a )
_a = recursively_load_weights_wavaveca(model.encoder, __a )
_a = SpeechaTextaForCausalLM(__a )
_a , _a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__a )
# set output linear layer
unexpected_keys.remove("embed_out" )
_a = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_a = SpeechEncoderDecoderModel(encoder=__a, decoder=__a )
_a = False
# add projection layer
_a = nn.Parameter(projection_layer.weight )
_a = nn.Parameter(projection_layer.bias )
_a = create_vocab_dict(__a )
with open(os.path.join(__a, "vocab.json" ), "w" ) as fp:
json.dump(__a, __a )
_a = SpeechaTextaTokenizer(os.path.join(__a, "vocab.json" ) )
tokenizer.save_pretrained(__a )
_a = hf_wavavec.config.to_dict()
_a = tokenizer.pad_token_id
_a = tokenizer.bos_token_id
_a = tokenizer.eos_token_id
_a = "speech_to_text_2"
_a = "wav2vec2"
_a = SpeechEncoderDecoderConfig.from_dict(__a )
hf_wavavec.save_pretrained(__a )
feature_extractor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__snake_case : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A ( _snake_case ):
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
class A ( nn.Module ):
__UpperCAmelCase : int
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
__UpperCAmelCase : jnp.dtype = jnp.floataa
def __lowerCAmelCase ( self ) -> List[str]:
_a = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_a = []
for i in range(len(self.block_out_channels ) - 1 ):
_a = self.block_out_channels[i]
_a = self.block_out_channels[i + 1]
_a = nn.Conv(
snake_case_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case_ )
_a = nn.Conv(
snake_case_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case_ )
_a = blocks
_a = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case_ ) -> Any:
_a = self.conv_in(snake_case_ )
_a = nn.silu(snake_case_ )
for block in self.blocks:
_a = block(snake_case_ )
_a = nn.silu(snake_case_ )
_a = self.conv_out(snake_case_ )
return embedding
@flax_register_to_config
class A ( nn.Module , _snake_case , _snake_case ):
__UpperCAmelCase : int = 32
__UpperCAmelCase : int = 4
__UpperCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCAmelCase : Union[bool, Tuple[bool]] = False
__UpperCAmelCase : Tuple[int] = (320, 640, 1280, 1280)
__UpperCAmelCase : int = 2
__UpperCAmelCase : Union[int, Tuple[int]] = 8
__UpperCAmelCase : Optional[Union[int, Tuple[int]]] = None
__UpperCAmelCase : int = 1280
__UpperCAmelCase : float = 0.0
__UpperCAmelCase : bool = False
__UpperCAmelCase : jnp.dtype = jnp.floataa
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : str = "rgb"
__UpperCAmelCase : Tuple[int] = (16, 32, 96, 256)
def __lowerCAmelCase ( self , snake_case_ ) -> int:
# init input tensors
_a = (1, self.in_channels, self.sample_size, self.sample_size)
_a = jnp.zeros(snake_case_ , dtype=jnp.floataa )
_a = jnp.ones((1,) , dtype=jnp.intaa )
_a = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_a = (1, 3, self.sample_size * 8, self.sample_size * 8)
_a = jnp.zeros(snake_case_ , dtype=jnp.floataa )
_a = jax.random.split(snake_case_ )
_a = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )["params"]
def __lowerCAmelCase ( self ) -> Dict:
_a = self.block_out_channels
_a = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_a = self.num_attention_heads or self.attention_head_dim
# input
_a = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_a = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_a = FlaxTimestepEmbedding(snake_case_ , dtype=self.dtype )
_a = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_a = self.only_cross_attention
if isinstance(snake_case_ , snake_case_ ):
_a = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case_ , snake_case_ ):
_a = (num_attention_heads,) * len(self.down_block_types )
# down
_a = []
_a = []
_a = block_out_channels[0]
_a = nn.Conv(
snake_case_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case_ )
for i, down_block_type in enumerate(self.down_block_types ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(snake_case_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_a = FlaxCrossAttnDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_a = FlaxDownBlockaD(
in_channels=snake_case_ , out_channels=snake_case_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case_ )
for _ in range(self.layers_per_block ):
_a = nn.Conv(
snake_case_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case_ )
if not is_final_block:
_a = nn.Conv(
snake_case_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case_ )
_a = down_blocks
_a = controlnet_down_blocks
# mid
_a = block_out_channels[-1]
_a = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_a = nn.Conv(
snake_case_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1.0 , snake_case_ = True , snake_case_ = False , ) -> Optional[Any]:
_a = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_a = jnp.flip(snake_case_ , axis=1 )
# 1. time
if not isinstance(snake_case_ , jnp.ndarray ):
_a = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_a = timesteps.astype(dtype=jnp.floataa )
_a = jnp.expand_dims(snake_case_ , 0 )
_a = self.time_proj(snake_case_ )
_a = self.time_embedding(snake_case_ )
# 2. pre-process
_a = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
_a = self.conv_in(snake_case_ )
_a = jnp.transpose(snake_case_ , (0, 2, 3, 1) )
_a = self.controlnet_cond_embedding(snake_case_ )
sample += controlnet_cond
# 3. down
_a = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case_ , snake_case_ ):
_a = down_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
else:
_a = down_block(snake_case_ , snake_case_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_a = self.mid_block(snake_case_ , snake_case_ , snake_case_ , deterministic=not train )
# 5. contronet blocks
_a = ()
for down_block_res_sample, controlnet_block in zip(snake_case_ , self.controlnet_down_blocks ):
_a = controlnet_block(snake_case_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_a = controlnet_down_block_res_samples
_a = self.controlnet_mid_block(snake_case_ )
# 6. scaling
_a = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case_ , mid_block_res_sample=snake_case_ )
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( lowercase__ ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Any:
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def __lowerCAmelCase ( self , snake_case_ = "auto" ) -> Tuple:
if slice_size == "auto":
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_=1_6_0_0_0 , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 5_0 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Optional[Any]:
_a = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
_a = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
_a = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = len(UpperCAmelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase__ )}.''' )
# get prompt text embeddings
_a = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_a = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_a = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_a = text_input_ids[:, : self.tokenizer.model_max_length]
_a = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_a = text_embeddings.shape
_a = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
_a = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = 42
if negative_prompt is None:
_a = [''''''] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !='''
F''' {type(UpperCAmelCase__ )}.''' )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
_a = negative_prompt
_a = text_input_ids.shape[-1]
_a = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_a = uncond_embeddings.shape[1]
_a = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
_a = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_a = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
_a = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_a = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
_a = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
_a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a = 1 / 0.18_215 * latents
_a = self.vae.decode(UpperCAmelCase__ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class A ( __UpperCAmelCase ):
__UpperCAmelCase : List[Any] = "luke"
def __init__( self , snake_case_=5_0_2_6_7 , snake_case_=5_0_0_0_0_0 , snake_case_=7_6_8 , snake_case_=2_5_6 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-1_2 , snake_case_=True , snake_case_=None , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> Dict:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_a = vocab_size
_a = entity_vocab_size
_a = hidden_size
_a = entity_emb_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = use_entity_aware_attention
_a = classifier_dropout
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase : Tuple = AltDiffusionPipeline
__UpperCAmelCase : Tuple = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_a = CLIPTextModel(__lowerCamelCase )
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 7_7
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[Any]:
if str(__lowerCamelCase ).startswith("mps" ):
_a = torch.manual_seed(__lowerCamelCase )
else:
_a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(__lowerCamelCase )
_a = text_encoder
_a = AltDiffusionPipeline(**__lowerCamelCase )
_a = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_a = self.get_dummy_inputs(__lowerCamelCase )
_a = '''A photo of an astronaut'''
_a = alt_pipe(**__lowerCamelCase )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(__lowerCamelCase )
_a = text_encoder
_a = AltDiffusionPipeline(**__lowerCamelCase )
_a = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_a = self.get_dummy_inputs(__lowerCamelCase )
_a = alt_pipe(**__lowerCamelCase )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__lowerCamelCase )
_a = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Any:
_a = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase )
_a = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=__lowerCamelCase , num_inference_steps=2 , output_type="numpy" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def _lowercase ( ):
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/transformers" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()], key=lambda lowerCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
_a = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case : Any = "true"
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : List[Any]=82, lowerCamelCase__ : Dict=16 ):
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(__SCREAMING_SNAKE_CASE )
_a = RegressionDataset(length=__SCREAMING_SNAKE_CASE )
_a = DataLoader(__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
model.to(accelerator.device )
_a = accelerator.prepare(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return model, ddp_model, dataloader
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Any=False ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
_a = load_dataset("glue", "mrpc", split="validation" )
def tokenize_function(lowerCamelCase__ : Any ):
_a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
with accelerator.main_process_first():
_a = dataset.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
_a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(lowerCamelCase__ : List[Any] ):
if use_longest:
return tokenizer.pad(__SCREAMING_SNAKE_CASE, padding="longest", return_tensors="pt" )
return tokenizer.pad(__SCREAMING_SNAKE_CASE, padding="max_length", max_length=128, return_tensors="pt" )
return DataLoader(__SCREAMING_SNAKE_CASE, shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=16 )
def _lowercase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : int ):
_a = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
_a = get_dataloader(__SCREAMING_SNAKE_CASE, not dispatch_batches )
_a = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
_a = accelerator.prepare(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : int, lowerCamelCase__ : int ):
_a = []
for batch in dataloader:
_a = batch.values()
with torch.no_grad():
_a = model(__SCREAMING_SNAKE_CASE )
_a = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_a = [], []
for logit, targ in logits_and_targets:
logits.append(__SCREAMING_SNAKE_CASE )
targs.append(__SCREAMING_SNAKE_CASE )
_a = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE )
return logits, targs
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple=82, lowerCamelCase__ : Dict=False, lowerCamelCase__ : List[str]=False, lowerCamelCase__ : List[Any]=16 ):
_a = get_basic_setup(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
_a = generate_predictions(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
assert (
len(__SCREAMING_SNAKE_CASE ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}'''
def _lowercase ( lowerCamelCase__ : List[str] = False, lowerCamelCase__ : str = False ):
_a = evaluate.load("glue", "mrpc" )
_a = get_mrpc_setup(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# First do baseline
_a = setup["no"]
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
for batch in dataloader:
batch.to(__SCREAMING_SNAKE_CASE )
with torch.inference_mode():
_a = model(**__SCREAMING_SNAKE_CASE )
_a = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE, references=batch["labels"] )
_a = metric.compute()
# Then do distributed
_a = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_a = model(**__SCREAMING_SNAKE_CASE )
_a = outputs.logits.argmax(dim=-1 )
_a = batch["labels"]
_a = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
_a = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _lowercase ( ):
_a = Accelerator(split_batches=__SCREAMING_SNAKE_CASE, dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_a = Accelerator(split_batches=__SCREAMING_SNAKE_CASE, dispatch_batches=__SCREAMING_SNAKE_CASE )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__SCREAMING_SNAKE_CASE, 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
_a = Accelerator()
test_torch_metrics(__SCREAMING_SNAKE_CASE, 512 )
accelerator.state._reset_state()
def _lowercase ( lowerCamelCase__ : int ):
main()
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase : List[Any] = DebertaVaTokenizer
__UpperCAmelCase : str = DebertaVaTokenizerFast
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[Any] = True
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a = DebertaVaTokenizer(A_ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
_a = "this is a test"
_a = "this is a test"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Dict:
_a = "<pad>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(A_ ) , 3_0_0_0_1 )
def __lowerCAmelCase ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCAmelCase ( self ) -> List[str]:
# fmt: off
_a = " \tHeLLo!how \n Are yoU? "
_a = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_a = DebertaVaTokenizer(A_ , do_lower_case=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> int:
# fmt: off
_a = "I was born in 92000, and this is falsé."
_a = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_a = DebertaVaTokenizer(A_ , split_by_punct=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# fmt: off
_a = "I was born in 92000, and this is falsé."
_a = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_a = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> Any:
# fmt: off
_a = "I was born in 92000, and this is falsé."
_a = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_a = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> int:
# fmt: off
_a = "I was born in 92000, and this is falsé."
_a = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_a = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> Dict:
# fmt: off
_a = " \tHeLLo!how \n Are yoU? "
_a = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_a = DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
_a = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
_a = tokenizer.encode(A_ , add_special_tokens=A_ )
_a = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(A_ )
_a = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "This is a test"
_a = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_a = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_a = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_a = DebertaVaTokenizer(A_ , keep_accents=A_ )
_a = DebertaVaTokenizerFast(A_ , keep_accents=A_ )
_a = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_a = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_a = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
_a = "I was born in 92000, and this is falsé."
_a = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_a = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_a = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_a = tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_a = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_a = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
_a = rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = DebertaVaTokenizer(A_ )
_a = tokenizer.encode("sequence builders" )
_a = tokenizer.encode("multi-sequence build" )
_a = tokenizer.build_inputs_with_special_tokens(A_ )
_a = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
# fmt: off
_a = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _lowercase ( lowerCamelCase__ : str ):
return "".join(sorted(_lowerCamelCase ) )
def _lowercase ( lowerCamelCase__ : str ):
return word_by_signature[signature(_lowerCamelCase )]
__snake_case : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__snake_case : Union[str, Any] = sorted({word.strip().lower() for word in data.splitlines()})
__snake_case : List[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__snake_case : str = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams)) | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
__snake_case : Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class A :
def __init__( self , snake_case_ , snake_case_ ) -> None:
_a = graph
# mapping node to its parent in resulting breadth first tree
_a = {}
_a = source_vertex
def __lowerCAmelCase ( self ) -> None:
_a = {self.source_vertex}
_a = None
_a = [self.source_vertex] # first in first out queue
while queue:
_a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCamelCase )
_a = vertex
queue.append(__UpperCamelCase )
def __lowerCAmelCase ( self , snake_case_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_a = self.parent.get(__UpperCamelCase )
if target_vertex_parent is None:
_a = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__UpperCamelCase )
return self.shortest_path(__UpperCamelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case : List[Any] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : int = 1, lowerCamelCase__ : str = "new", lowerCamelCase__ : list | None = None ):
_a = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
_a = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowerCAmelCase__ )
_a = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''', headers={"User-agent": "A random string"}, )
if response.status_code == 429:
raise requests.HTTPError
_a = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
_a = {}
for id_ in range(lowerCAmelCase__ ):
_a = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( a__ ):
def __init__( self , snake_case_ , snake_case_ ) -> Any:
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = 1_0_0 , snake_case_ = None , snake_case_ = None , snake_case_ = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_a = self.unet.config.sample_size / self.unet.config.sample_rate
_a = audio_length_in_s * self.unet.config.sample_rate
_a = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_a = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
_a = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
_a = int(lowerCamelCase_ )
_a = next(iter(self.unet.parameters() ) ).dtype
_a = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_a = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , device=audio.device )
_a = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_a = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
_a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_a = audio.clamp(-1 , 1 ).float().cpu().numpy()
_a = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Optional[int], lowerCamelCase__ : int="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
__snake_case : int = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase : Dict = XLMRobertaTokenizer
__UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_a = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
_a = '''<pad>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_UpperCAmelCase ) , 1_0_0_2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def __lowerCAmelCase ( self ) -> Any:
_a = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_a = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __lowerCAmelCase ( self ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_a = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_a = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_a = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_UpperCAmelCase )
_a = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_a = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_UpperCAmelCase )
_a = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
_a = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_UpperCAmelCase )
_a = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
_a = tempfile.mkdtemp()
_a = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
_a = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_a = tokenizer_r.from_pretrained(_UpperCAmelCase )
_a = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase , f.name )
_a = XLMRobertaTokenizer(f.name , keep_accents=_UpperCAmelCase )
_a = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def __lowerCAmelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = '''I was born in 92000, and this is falsé.'''
_a = tokenizer.tokenize(_UpperCAmelCase )
_a = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_a = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_a = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(_UpperCAmelCase )
_a = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = '''Hello World!'''
_a = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self ) -> int:
_a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_a = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# fmt: off
_a = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : int, lowerCamelCase__ : List[Any] ):
_a = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_a = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), UpperCAmelCase__ )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__snake_case : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list[float]] ):
_a = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def _lowercase ( lowerCamelCase__ : list[list[float]], lowerCamelCase__ : list[int] ):
_a = []
for dlist, weight in zip(UpperCAmelCase__, UpperCAmelCase__ ):
_a = min(UpperCAmelCase__ )
_a = max(UpperCAmelCase__ )
_a = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_a = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def _lowercase ( lowerCamelCase__ : list[list[float]] ):
_a = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
_a = final_scores[j] + ele
return final_scores
def _lowercase ( lowerCamelCase__ : list[list[float]], lowerCamelCase__ : list[int] ):
_a = get_data(UpperCAmelCase__ )
_a = calculate_each_score(UpperCAmelCase__, UpperCAmelCase__ )
_a = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case : List[Any] = 16
__snake_case : Tuple = 32
def _lowercase ( lowerCamelCase__ : Accelerator, lowerCamelCase__ : int = 16 ):
_a = AutoTokenizer.from_pretrained("bert-base-cased" )
_a = load_dataset("glue", "mrpc" )
def tokenize_function(lowerCamelCase__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_UpperCamelCase, max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a = datasets.map(
_UpperCamelCase, batched=_UpperCamelCase, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(lowerCamelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a = 16
elif accelerator.mixed_precision != "no":
_a = 8
else:
_a = None
return tokenizer.pad(
_UpperCamelCase, padding="longest", max_length=_UpperCamelCase, pad_to_multiple_of=_UpperCamelCase, return_tensors="pt", )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
_a = DataLoader(
tokenized_datasets["validation"], shuffle=_UpperCamelCase, collate_fn=_UpperCamelCase, batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any] ):
if os.environ.get("TESTING_MOCKED_DATALOADERS", _UpperCamelCase ) == "1":
_a = 2
# New Code #
_a = int(args.gradient_accumulation_steps )
# Initialize accelerator
_a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = evaluate.load("glue", "mrpc" )
set_seed(_UpperCamelCase )
_a , _a = get_dataloaders(_UpperCamelCase, _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a = model.to(accelerator.device )
# Instantiate optimizer
_a = AdamW(params=model.parameters(), lr=_UpperCamelCase )
# Instantiate scheduler
_a = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
_a = model(**_UpperCamelCase )
_a = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**_UpperCamelCase )
_a = outputs.logits.argmax(dim=-1 )
_a , _a = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase, references=_UpperCamelCase, )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', _UpperCamelCase )
def _lowercase ( ):
_a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=_UpperCamelCase, default=_UpperCamelCase, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps", type=_UpperCamelCase, default=1, help="The number of minibatches to be ran before gradients are accumulated.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
_a = parser.parse_args()
_a = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : List[str] = """BlipImageProcessor"""
__UpperCAmelCase : Any = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = False
super().__init__(snake_case_ , snake_case_ )
_a = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> Optional[int]:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_a = self.tokenizer
_a = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_a = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_a = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> int:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self ) -> str:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class A ( __lowerCAmelCase ):
def __init__( self , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=None , **snake_case_ ) -> Optional[int]:
_a = parent
_a = config_class
_a = has_text_modality
_a = kwargs
_a = common_properties
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.config_class(**self.inputs_dict )
_a = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCamelCase__ ):
try:
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.parent.assertEqual(
getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase__ , lowerCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCamelCase__ ):
try:
_a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(lowerCamelCase__ , lowerCamelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.config_class(**self.inputs_dict )
_a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCamelCase__ , "config.json" )
config_first.to_json_file(lowerCamelCase__ )
_a = self.config_class.from_json_file(lowerCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> int:
_a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCamelCase__ )
_a = self.config_class.from_pretrained(lowerCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.config_class(**self.inputs_dict )
_a = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
config_first.save_pretrained(lowerCamelCase__ )
_a = self.config_class.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCAmelCase ( self ) -> Any:
_a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowerCAmelCase ( self ) -> Any:
if self.config_class.is_composition:
return
_a = self.config_class()
self.parent.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = copy.deepcopy(lowerCamelCase__ )
_a = self.config_class(**lowerCamelCase__ )
_a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(lowerCamelCase__ , lowerCamelCase__ ) != value:
wrong_values.append((key, getattr(lowerCamelCase__ , lowerCamelCase__ ), value) )
if len(lowerCamelCase__ ) > 0:
_a = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def __lowerCAmelCase ( self ) -> Any:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
if not isinstance(snake_case_, snake_case_ ):
raise TypeError("Input value must be an \'int\' type" )
_a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__snake_case : Union[str, Any] = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
__snake_case : Union[str, Any] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
__snake_case : Tuple = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Any ):
return float((preds == labels).mean() )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str ):
_a = simple_accuracy(UpperCamelCase__, UpperCamelCase__ )
_a = float(fa_score(y_true=UpperCamelCase__, y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Any ):
_a = np.array(UpperCamelCase__ )
_a = np.array(UpperCamelCase__ )
_a = en_sentvecs.shape[0]
# mean centering
_a = en_sentvecs - np.mean(UpperCamelCase__, axis=0 )
_a = in_sentvecs - np.mean(UpperCamelCase__, axis=0 )
_a = cdist(UpperCamelCase__, UpperCamelCase__, "cosine" )
_a = np.array(range(UpperCamelCase__ ) )
_a = sim.argsort(axis=1 )[:, :10]
_a = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case_ , snake_case_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__snake_case : Optional[int] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__snake_case : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
__snake_case : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case : int = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__snake_case : Tuple = "allenai"
def _lowercase ( lowerCamelCase__ : List[str] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_a = dict((re.sub(R"@@$", "", lowerCamelCase__ ), v) if k.endswith("@@" ) else (re.sub(R"$", "</w>", lowerCamelCase__ ), v) for k, v in d.items() )
_a = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_a = d[k] # restore
return da
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ):
# prep
assert os.path.exists(lowerCamelCase__ )
os.makedirs(lowerCamelCase__, exist_ok=lowerCamelCase__ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_a = basename(lowerCamelCase__ )
_a = dirname(lowerCamelCase__ )
_a = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_a = cls.hub_models()
_a = {"bpe": "fastbpe", "tokenizer": "moses"}
_a = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_a = hub_utils.from_pretrained(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, archive_map=lowerCamelCase__, **lowerCamelCase__ )
_a = vars(chkpt["args"]["model"] )
_a = args["source_lang"]
_a = args["target_lang"]
_a = dirname(lowerCamelCase__ )
_a = basename(lowerCamelCase__ )
# dicts
_a = os.path.join(lowerCamelCase__, F'''dict.{src_lang}.txt''' )
_a = os.path.join(lowerCamelCase__, F'''dict.{tgt_lang}.txt''' )
_a = Dictionary.load(lowerCamelCase__ )
_a = rewrite_dict_keys(src_dict.indices )
_a = len(lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_a = True
for k in src_vocab.keys():
if not k.islower():
_a = False
break
_a = Dictionary.load(lowerCamelCase__ )
_a = rewrite_dict_keys(tgt_dict.indices )
_a = len(lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# merges_file (bpecodes)
_a = os.path.join(lowerCamelCase__, VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ):
break
with open(lowerCamelCase__, encoding="utf-8" ) as fin:
_a = fin.read()
_a = re.sub(R" \d+$", "", lowerCamelCase__, 0, re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as fout:
fout.write(lowerCamelCase__ )
# model config
_a = os.path.join(lowerCamelCase__, "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
_a = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
_a = 5
_a = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_a = best_score_hparams[model_dir]["length_penalty"]
else:
_a = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# tokenizer config
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
_a = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__, ensure_ascii=lowerCamelCase__, indent=lowerCamelCase__ ) )
# model
_a = chkpt["models"][0]
_a = model.state_dict()
# rename keys to start with 'model.'
_a = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_a = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase__, lowerCamelCase__ )
_a = FSMTConfig.from_pretrained(lowerCamelCase__ )
_a = FSMTForConditionalGeneration(lowerCamelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase__, strict=lowerCamelCase__ )
# save
_a = os.path.join(lowerCamelCase__, lowerCamelCase__ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowerCamelCase__, lowerCamelCase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__snake_case : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( a ):
__UpperCAmelCase : "DiagonalGaussianDistribution"
class A ( a , a ):
__UpperCAmelCase : Optional[Any] = True
@register_to_config
def __init__( self , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = ("DownEncoderBlock2D",) , snake_case_ = ("UpDecoderBlock2D",) , snake_case_ = (6_4,) , snake_case_ = 1 , snake_case_ = "silu" , snake_case_ = 4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 0.18_215 , ) -> Dict:
super().__init__()
# pass init params to Encoder
_a = Encoder(
in_channels=snake_case_ , out_channels=snake_case_ , down_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , act_fn=snake_case_ , norm_num_groups=snake_case_ , double_z=snake_case_ , )
# pass init params to Decoder
_a = Decoder(
in_channels=snake_case_ , out_channels=snake_case_ , up_block_types=snake_case_ , block_out_channels=snake_case_ , layers_per_block=snake_case_ , norm_num_groups=snake_case_ , act_fn=snake_case_ , )
_a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_a = nn.Convad(snake_case_ , snake_case_ , 1 )
_a = False
_a = False
# only relevant if vae tiling is enabled
_a = self.config.sample_size
_a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_a = 0.25
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Any:
if isinstance(snake_case_ , (Encoder, Decoder) ):
_a = value
def __lowerCAmelCase ( self , snake_case_ = True ) -> List[str]:
_a = use_tiling
def __lowerCAmelCase ( self ) -> str:
self.enable_tiling(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = True
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
_a = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
_a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
_a = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , "set_processor" ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case_ , return_dict=snake_case_ )
if self.use_slicing and x.shape[0] > 1:
_a = [self.encoder(snake_case_ ) for x_slice in x.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case_ , return_dict=snake_case_ )
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
@apply_forward_hook
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_a = [self._decode(snake_case_ ).sample for z_slice in z.split(1 )]
_a = torch.cat(snake_case_ )
else:
_a = self._decode(snake_case_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = min(a.shape[2] , b.shape[2] , snake_case_ )
for y in range(snake_case_ ):
_a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_a = min(a.shape[3] , b.shape[3] , snake_case_ )
for x in range(snake_case_ ):
_a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> AutoencoderKLOutput:
_a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_latent_min_size * self.tile_overlap_factor )
_a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_a = []
for i in range(0 , x.shape[2] , snake_case_ ):
_a = []
for j in range(0 , x.shape[3] , snake_case_ ):
_a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_a = self.encoder(snake_case_ )
_a = self.quant_conv(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
_a = DiagonalGaussianDistribution(snake_case_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_a = int(self.tile_sample_min_size * self.tile_overlap_factor )
_a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_a = []
for i in range(0 , z.shape[2] , snake_case_ ):
_a = []
for j in range(0 , z.shape[3] , snake_case_ ):
_a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_a = self.post_quant_conv(snake_case_ )
_a = self.decoder(snake_case_ )
row.append(snake_case_ )
rows.append(snake_case_ )
_a = []
for i, row in enumerate(snake_case_ ):
_a = []
for j, tile in enumerate(snake_case_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_a = self.blend_v(rows[i - 1][j] , snake_case_ , snake_case_ )
if j > 0:
_a = self.blend_h(row[j - 1] , snake_case_ , snake_case_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case_ , dim=3 ) )
_a = torch.cat(snake_case_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True , snake_case_ = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_a = sample
_a = self.encode(snake_case_ ).latent_dist
if sample_posterior:
_a = posterior.sample(generator=snake_case_ )
else:
_a = posterior.mode()
_a = self.decode(snake_case_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case_ )
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : int ):
_a = word.split()
def justify(lowerCamelCase__ : list, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> str:
_a = max_width - width
_a = len(lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_a = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_a = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_a = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase__ ):
num_spaces_between_words_list[i] += 1
_a = []
for i in range(lowerCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase__ )
_a = []
_a = []
_a = 0
for word in words:
if width + len(lowerCamelCase__ ) + len(lowerCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase__ )
width += len(lowerCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ) )
# reset new line and new width
_a , _a = [word], len(lowerCamelCase__ )
_a = max_width - width - len(lowerCamelCase__ )
answer.append(" ".join(lowerCamelCase__ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
_a = 0
_a = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_a = i + 1
else:
_a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : Any ):
if isinstance(lowerCamelCase__, torch.Tensor ):
return image
elif isinstance(lowerCamelCase__, PIL.Image.Image ):
_a = [image]
if isinstance(image[0], PIL.Image.Image ):
_a = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_a = np.concatenate(lowerCamelCase__, axis=0 )
_a = np.array(lowerCamelCase__ ).astype(np.floataa ) / 255.0
_a = image.transpose(0, 3, 1, 2 )
_a = 2.0 * image - 1.0
_a = torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0], torch.Tensor ):
_a = torch.cat(lowerCamelCase__, dim=0 )
return image
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str]=0.99_95 ):
if not isinstance(lowerCamelCase__, np.ndarray ):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(lowerCamelCase__ )
_a = np.sin(lowerCamelCase__ )
_a = theta_a * t
_a = np.sin(lowerCamelCase__ )
_a = np.sin(theta_a - theta_t ) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Tuple ):
_a = F.normalize(lowerCamelCase__, dim=-1 )
_a = F.normalize(lowerCamelCase__, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[Any] ):
for param in model.parameters():
_a = value
class A ( a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=snake_case_ , text_encoder=snake_case_ , clip_model=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , feature_extractor=snake_case_ , coca_model=snake_case_ , coca_tokenizer=snake_case_ , coca_transform=snake_case_ , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , snake_case_ )
else feature_extractor.size["shortest_edge"]
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case_ )
set_requires_grad(self.clip_model , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
set_requires_grad(self.vae , snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
set_requires_grad(self.vae , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.unet , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
set_requires_grad(self.unet , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
# get the original timestep using init_timestep
_a = min(int(num_inference_steps * strength ) , snake_case_ )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Optional[Any]:
if not isinstance(snake_case_ , torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(snake_case_ )}''' )
_a = image.to(device=snake_case_ , dtype=snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case_ )
]
_a = torch.cat(snake_case_ , dim=0 )
else:
_a = self.vae.encode(snake_case_ ).latent_dist.sample(snake_case_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18_215 * init_latents
_a = init_latents.repeat_interleave(snake_case_ , dim=0 )
_a = randn_tensor(init_latents.shape , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
_a = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
_a = init_latents
return latents
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = self.coca_transform(snake_case_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = self.feature_extractor.preprocess(snake_case_ )
_a = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(snake_case_ )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_a = image_embeddings_clip.repeat_interleave(snake_case_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_a = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(snake_case_ )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case_ ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18_215 * sample
_a = self.vae.decode(snake_case_ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(snake_case_ )
_a = self.normalize(snake_case_ ).to(latents.dtype )
_a = self.clip_model.get_image_features(snake_case_ )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case_ )
_a = spherical_dist_loss(snake_case_ , snake_case_ ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(snake_case_ , snake_case_ )[0]
if isinstance(self.scheduler , snake_case_ ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(snake_case_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 0.6 , snake_case_ = 5_0 , snake_case_ = 7.5 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1_0_0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = 0.8 , snake_case_ = 0.1 , snake_case_ = 0.1 , ) -> List[str]:
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(snake_case_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(snake_case_ , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ", ".join(snake_case_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case_ ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(snake_case_ )
if style_prompt is None:
if len(snake_case_ ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(snake_case_ )
# get prompt text embeddings for content and style
_a = self.tokenizer(
snake_case_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
snake_case_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(snake_case_ , snake_case_ , snake_case_ )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(snake_case_ , dim=0 )
# set timesteps
_a = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(snake_case_ , **snake_case_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(snake_case_ , snake_case_ , self.device )
_a = timesteps[:1].repeat(snake_case_ )
# Preprocess image
_a = preprocess(snake_case_ , snake_case_ , snake_case_ )
_a = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_a = preprocess(snake_case_ , snake_case_ , snake_case_ )
_a = self.prepare_latents(
snake_case_ , snake_case_ , snake_case_ , text_embeddings.dtype , self.device , snake_case_ )
_a = slerp(snake_case_ , snake_case_ , snake_case_ )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_a = self.get_clip_image_embeddings(snake_case_ , snake_case_ )
_a = slerp(
snake_case_ , snake_case_ , snake_case_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([""] , padding="max_length" , max_length=snake_case_ , return_tensors="pt" )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(snake_case_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(snake_case_ , generator=snake_case_ , device="cpu" , dtype=snake_case_ ).to(
self.device )
else:
_a = torch.randn(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=snake_case_ ):
for i, t in enumerate(snake_case_ ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_a = self.unet(snake_case_ , snake_case_ , encoder_hidden_states=snake_case_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18_215 * latents
_a = self.vae.decode(snake_case_ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case_ , nsfw_content_detected=snake_case_ )
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A ( a ):
def __init__( self , snake_case_=0.01 , snake_case_=1_0_0_0 ) -> int:
_a = p_stop
_a = max_length
def __iter__( self ) -> Optional[Any]:
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False , snake_case_=True ) -> Any:
_a = [
BatchSamplerShard(snake_case_ , 2 , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
for i in range(2 )
]
_a = [list(snake_case_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case_ ) for shard in batch_sampler_shards] , [len(snake_case_ ) for e in expected] )
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
_a = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
_a = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_a = [BatchSamplerShard(snake_case_ , 2 , snake_case_ , even_batches=snake_case_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=2 , snake_case_=False ) -> Tuple:
random.seed(snake_case_ )
_a = list(snake_case_ )
_a = [
IterableDatasetShard(
snake_case_ , batch_size=snake_case_ , drop_last=snake_case_ , num_processes=snake_case_ , process_index=snake_case_ , split_batches=snake_case_ , )
for i in range(snake_case_ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case_ )
iterable_dataset_lists.append(list(snake_case_ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
self.assertTrue(len(snake_case_ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(snake_case_ ) , snake_case_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case_ ) < len(snake_case_ ):
reference += reference
self.assertListEqual(snake_case_ , reference[: len(snake_case_ )] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = 4_2
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=snake_case_ )
_a = SkipBatchSampler(snake_case_ , 2 )
self.assertListEqual(list(snake_case_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCAmelCase ( self ) -> Any:
_a = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_a = skip_first_batches(snake_case_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCAmelCase ( self ) -> str:
Accelerator()
_a = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
import os
from math import logaa
def _lowercase ( lowerCamelCase__ : str = "base_exp.txt" ):
_a = 0
_a = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ), lowerCamelCase__ ) ) ):
_a , _a = list(map(lowerCamelCase__, line.split("," ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
_a = x * logaa(lowerCamelCase__ )
_a = i + 1
return result
if __name__ == "__main__":
print(solution()) | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A ( a ):
def __lt__( self , snake_case_ ) -> Optional[int]:
return self[-1] < other[-1]
def __eq__( self , snake_case_ ) -> Optional[Any]:
return self[-1] == other[-1]
def _lowercase ( lowerCamelCase__ : list ):
_a = []
# sort into stacks
for element in collection:
_a = Stack([element] )
_a = bisect_left(lowerCamelCase__, lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
_a = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case : Any = input("Enter numbers separated by a comma:\n").strip()
__snake_case : int = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_a , _a , _a = equationa
_a , _a , _a = equationa
# Calculate the determinants of the matrices
_a = aa * ba - aa * ba
_a = ca * ba - ca * ba
_a = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a = determinant_x / determinant
_a = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
from __future__ import annotations
__snake_case : List[str] = list[list[int]]
# assigning initial values to the grid
__snake_case : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( lowerCamelCase__ : Matrix, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( lowerCamelCase__ : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
if location := find_empty_location(lowerCamelCase__ ):
_a , _a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
_a = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
_a = 0
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
for row in grid:
for cell in row:
print(lowerCamelCase__, end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__snake_case : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A :
__UpperCAmelCase : torch.Tensor # [batch_size x 3]
__UpperCAmelCase : torch.Tensor # [batch_size x 3]
__UpperCAmelCase : torch.Tensor # [batch_size x 3]
__UpperCAmelCase : torch.Tensor # [batch_size x 3]
__UpperCAmelCase : int
__UpperCAmelCase : int
__UpperCAmelCase : float
__UpperCAmelCase : float
__UpperCAmelCase : Tuple[int]
def __lowerCAmelCase ( self ) -> Dict:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __lowerCAmelCase ( self ) -> int:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __lowerCAmelCase ( self ) -> Any:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __lowerCAmelCase ( self ) -> torch.Tensor:
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case_ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def __lowerCAmelCase ( self ) -> Any:
_a , *_a = self.shape
_a = int(np.prod(snake_case_ ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(snake_case_ )
_a = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __lowerCAmelCase ( self , snake_case_ ) -> torch.Tensor:
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(snake_case_ , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(snake_case_ , -1 , 2 )
_a = (
self.z.view(snake_case_ , 1 , 3 )
+ self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=snake_case_ )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case_ , *snake_case_ , 2 , 3 )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowercase ( lowerCamelCase__ : int ):
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0, 2 * np.pi, num=20 ):
_a = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
_a = np.cross(lowerCamelCase__, lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), x=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), y=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), z=torch.from_numpy(np.stack(lowerCamelCase__, axis=0 ) ).float(), width=lowerCamelCase__, height=lowerCamelCase__, x_fov=0.7, y_fov=0.7, shape=(1, len(lowerCamelCase__ )), )
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
import math
import unittest
def _lowercase ( lowerCamelCase__ : int ):
assert isinstance(lowerCamelCase__, lowerCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(lowerCamelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaises(snake_case_ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
__snake_case : Dict = {"target_lang": "fi", "source_lang": "en"}
__snake_case : Any = ">>zh<<"
__snake_case : str = "Helsinki-NLP/"
if is_torch_available():
__snake_case : Any = "pt"
elif is_tf_available():
__snake_case : Dict = "tf"
else:
__snake_case : Dict = "jax"
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Dict = MarianTokenizer
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = True
def __lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
_a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> str:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(snake_case_ ) , 9 )
def __lowerCAmelCase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
_a = en_de_tokenizer(["I am a small frog"] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
_a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
_a = [x.name for x in Path(snake_case_ ).glob("*" )]
self.assertIn("source.spm" , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_tokenizer()
_a = tok(["I am a tiny frog", "I am a small frog"] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __lowerCAmelCase ( self ) -> Dict:
# fmt: off
_a = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def __lowerCAmelCase ( self ) -> str:
_a = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
_a = "Tämä on testi"
_a = "This is a test"
_a = [7_6, 7, 2_0_4_7, 2]
_a = [6_9, 1_2, 1_1, 9_4_0, 2]
_a = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
_a = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
_a = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : int = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__snake_case : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__snake_case : str = {"facebook/blenderbot-3B": 128}
class A ( a ):
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[str] = BlenderbotTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[Any]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ ) -> List[int]:
_a = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case_ )
_a = " ".join(snake_case_ )
_a = self.encode(snake_case_ )
if len(snake_case_ ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = (3, 3_2, 1_2_8)
_a = tempfile.mkdtemp()
# fmt: off
_a = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
_a = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
_a = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , **snake_case_ ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_a = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = self.get_image_processor()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_a = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = self.prepare_image_inputs()
_a = image_processor(snake_case_ , return_tensors="np" )
_a = processor(images=snake_case_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = processor(text=snake_case_ )
_a = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = "test"
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.char_decode(snake_case_ )
_a = tokenizer.batch_decode(snake_case_ )
_a = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = None
_a = self.prepare_image_inputs()
_a = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_a = torch.randn(1 , 2_7 , 3_8 )
_a = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_a = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case : Union[str, Any] = 16
__snake_case : Optional[Any] = 32
def _lowercase ( lowerCamelCase__ : Accelerator, lowerCamelCase__ : int = 16, lowerCamelCase__ : str = "bert-base-cased" ):
_a = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_a = load_dataset("glue", "mrpc" )
def tokenize_function(lowerCamelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_a = tokenizer(examples["sentence1"], examples["sentence2"], truncation=lowerCamelCase__, max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a = datasets.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=lowerCamelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(lowerCamelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase__, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(lowerCamelCase__, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_a = DataLoader(
tokenized_datasets["train"], shuffle=lowerCamelCase__, collate_fn=lowerCamelCase__, batch_size=lowerCamelCase__ )
_a = DataLoader(
tokenized_datasets["validation"], shuffle=lowerCamelCase__, collate_fn=lowerCamelCase__, batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : str ):
# Initialize accelerator
_a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a = config["lr"]
_a = int(config["num_epochs"] )
_a = int(config["seed"] )
_a = int(config["batch_size"] )
_a = args.model_name_or_path
set_seed(lowerCamelCase__ )
_a , _a = get_dataloaders(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase__, return_dict=lowerCamelCase__ )
# Instantiate optimizer
_a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a = optimizer_cls(params=model.parameters(), lr=lowerCamelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_a = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_a = 1
_a = (len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__, num_warmup_steps=0, num_training_steps=lowerCamelCase__, )
else:
_a = DummyScheduler(lowerCamelCase__, total_num_steps=lowerCamelCase__, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a = accelerator.prepare(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
# We need to keep track of how many total steps we have iterated over
_a = 0
# We also need to keep track of the stating epoch so files are named properly
_a = 0
# Now we train the model
_a = evaluate.load("glue", "mrpc" )
_a = 0
_a = {}
for epoch in range(lowerCamelCase__, lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
_a = model(**lowerCamelCase__ )
_a = outputs.loss
_a = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a = model(**lowerCamelCase__ )
_a = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase__ ) - 1:
_a = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase__, references=lowerCamelCase__, )
_a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', lowerCamelCase__ )
_a = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_a = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "all_results.json" ), "w" ) as f:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( ):
_a = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=lowerCamelCase__, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=lowerCamelCase__, )
parser.add_argument(
"--output_dir", type=lowerCamelCase__, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--performance_lower_bound", type=lowerCamelCase__, default=lowerCamelCase__, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", )
parser.add_argument(
"--num_epochs", type=lowerCamelCase__, default=3, help="Number of train epochs.", )
_a = parser.parse_args()
_a = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(lowerCamelCase__, lowerCamelCase__ )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__snake_case : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__snake_case : Any = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__snake_case : Tuple = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__snake_case : int = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__snake_case : Any = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__snake_case : Tuple = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__snake_case : Dict = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__snake_case : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__snake_case : List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__snake_case : Optional[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__snake_case : Union[str, Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__snake_case : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__snake_case : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__snake_case : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__snake_case : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__snake_case : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__snake_case : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__snake_case : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__snake_case : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__snake_case : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__snake_case : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_MAPPING
__snake_case : Union[str, Any] = auto_class_update(FlaxAutoModel)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__snake_case : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__snake_case : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__snake_case : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__snake_case : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__snake_case : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__snake_case : str = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__snake_case : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__snake_case : Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class A ( _BaseAutoModelClass ):
__UpperCAmelCase : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__snake_case : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( a , unittest.TestCase ):
__UpperCAmelCase : int = KandinskyImgaImgPipeline
__UpperCAmelCase : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
__UpperCAmelCase : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__UpperCAmelCase : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__UpperCAmelCase : Optional[int] = False
@property
def __lowerCAmelCase ( self ) -> Dict:
return 3_2
@property
def __lowerCAmelCase ( self ) -> int:
return 3_2
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return 1_0_0
@property
def __lowerCAmelCase ( self ) -> Tuple:
_a = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a = MultilingualCLIP(snake_case_ )
_a = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
_a = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_a = UNetaDConditionModel(**snake_case_ )
return model
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_unet
_a = self.dummy_movq
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_a = DDIMScheduler(**snake_case_ )
_a = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Tuple:
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
_a = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "cpu"
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
_a = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_a = pipe(**self.get_dummy_inputs(snake_case_ ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_a = "A red cartoon frog, 4k"
_a = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
_a = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_a = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a , _a = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_a = pipeline(
snake_case_ , image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
_a = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
'''simple docstring'''
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def _lowercase ( lowerCamelCase__ : List[Any]="no", lowerCamelCase__ : str = default_json_config_file, lowerCamelCase__ : bool = False ):
_a = Path(lowerCamelCase__ )
path.parent.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
_a = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
_a = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_a = torch.cuda.device_count()
_a = num_gpus
_a = False
if num_gpus > 1:
_a = "MULTI_GPU"
else:
_a = "NO"
elif is_xpu_available() and use_xpu:
_a = torch.xpu.device_count()
_a = num_xpus
_a = False
if num_xpus > 1:
_a = "MULTI_XPU"
else:
_a = "NO"
elif is_npu_available():
_a = torch.npu.device_count()
_a = num_npus
_a = False
if num_npus > 1:
_a = "MULTI_NPU"
else:
_a = "NO"
else:
_a = 0
_a = True
_a = 1
_a = "NO"
_a = ClusterConfig(**lowerCamelCase__ )
config.to_json_file(lowerCamelCase__ )
return path
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
_a = parser.add_parser("default", parents=lowerCamelCase__, help=lowerCamelCase__, formatter_class=lowerCamelCase__ )
parser.add_argument(
"--config_file", default=lowerCamelCase__, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), dest="save_location", )
parser.add_argument(
"--mixed_precision", choices=["no", "fp16", "bf16"], type=lowerCamelCase__, help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.", default="no", )
parser.set_defaults(func=lowerCamelCase__ )
return parser
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 721 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Optional[int]=False ):
_a = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any], lowerCamelCase__ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_a = ""
else:
_a = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_a = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def _lowercase ( lowerCamelCase__ : Dict ):
_a = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__, lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : str, lowerCamelCase__ : Union[str, Any] ):
_a = dct.pop(lowerCamelCase__ )
_a = val
def _lowercase ( ):
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : List[Any], lowerCamelCase__ : Optional[Any]=False ):
_a = BitConfig(
global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=lowerCamelCase__, )
_a = ViTHybridConfig(backbone_config=lowerCamelCase__, image_size=384, num_labels=1_000 )
_a = False
# load original model from timm
_a = timm.create_model(lowerCamelCase__, pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase__ )
_a = create_rename_keys(lowerCamelCase__, lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = json.load(open(hf_hub_download(lowerCamelCase__, lowerCamelCase__, repo_type="dataset" ), "r" ) )
_a = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_a = ViTHybridModel(lowerCamelCase__ ).eval()
else:
_a = ViTHybridForImageClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# create image processor
_a = create_transform(**resolve_data_config({}, model=lowerCamelCase__ ) )
_a = transform.transforms
_a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_a = ViTHybridImageProcessor(
do_resize=lowerCamelCase__, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCamelCase__, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=lowerCamelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_a = prepare_img()
_a = transform(lowerCamelCase__ ).unsqueeze(0 )
_a = processor(lowerCamelCase__, return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase__, lowerCamelCase__ )
# verify logits
with torch.no_grad():
_a = model(lowerCamelCase__ )
_a = outputs.logits
print("Predicted class:", logits.argmax(-1 ).item() )
if base_model:
_a = timm_model.forward_features(lowerCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase__, outputs.pooler_output, atol=1e-3 )
else:
_a = timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__, outputs.logits, atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__snake_case : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__snake_case : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Optional[Any] = 12_8022
__snake_case : List[str] = 12_8028
@require_sentencepiece
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = MaMaaaTokenizer
__UpperCAmelCase : int = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = True
def __lowerCAmelCase ( self ) -> Any:
super().setUp()
_a = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_a = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_a = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
_a = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **snake_case_ ) -> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "</s>"
_a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.get_tokenizer()
_a = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(snake_case_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __lowerCAmelCase ( self ) -> Any:
pass
def __lowerCAmelCase ( self ) -> Dict:
_a = self.get_tokenizer()
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [2, 3, 4, 5, 6] , )
_a = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
_a = tokenizer.convert_tokens_to_string(snake_case_ )
self.assertEqual(snake_case_ , "This is a test" )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
_a = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = """facebook/m2m100_418M"""
__UpperCAmelCase : Dict = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__UpperCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__UpperCAmelCase : Any = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def __lowerCAmelCase ( cls ) -> int:
_a = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
_a = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer.get_vocab()
self.assertEqual(len(snake_case_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = "en"
_a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
# fmt: off
_a = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
_a = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
_a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = tempfile.mkdtemp()
_a = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(snake_case_ )
_a = MaMaaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.lang_token_to_id , snake_case_ )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = "en"
_a = "fr"
_a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors="pt" )
_a = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
_a = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
_a = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_a = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """wav2vec2"""
def __init__( self , snake_case_=3_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(1_0, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_2_8 , snake_case_=1_6 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=1_0 , snake_case_=2 , snake_case_=0.0 , snake_case_=1_0 , snake_case_=0 , snake_case_=3_2_0 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_0_0 , snake_case_=2_5_6 , snake_case_=2_5_6 , snake_case_=0.1 , snake_case_="sum" , snake_case_=False , snake_case_=False , snake_case_=2_5_6 , snake_case_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case_=(5, 3, 3, 1, 1) , snake_case_=(1, 2, 3, 1, 1) , snake_case_=5_1_2 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=False , snake_case_=3 , snake_case_=2 , snake_case_=3 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = list(snake_case_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 691 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : List[str] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class A ( a ):
__UpperCAmelCase : Union[str, Any] = """mgp-str"""
def __init__( self , snake_case_=[3_2, 1_2_8] , snake_case_=4 , snake_case_=3 , snake_case_=2_7 , snake_case_=3_8 , snake_case_=5_0_2_5_7 , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=4.0 , snake_case_=True , snake_case_=False , snake_case_=1E-5 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=False , snake_case_=0.02 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
_a = image_size
_a = patch_size
_a = num_channels
_a = max_token_length
_a = num_character_labels
_a = num_bpe_labels
_a = num_wordpiece_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = mlp_ratio
_a = distilled
_a = layer_norm_eps
_a = drop_rate
_a = qkv_bias
_a = attn_drop_rate
_a = drop_path_rate
_a = output_aa_attentions
_a = initializer_range
| 702 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__snake_case : Optional[int] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
__snake_case : Optional[int] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
__snake_case : Optional[Any] = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> int:
_a = 0.0
for i, j in zip(snake_case_ , snake_case_ ):
n_correct += 1.0 if math_equivalence.is_equiv(snake_case_ , snake_case_ ) else 0.0
_a = n_correct / len(snake_case_ )
return {
"accuracy": accuracy,
}
| 703 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case : Tuple = logging.getLogger(__name__)
class A ( a ):
def __init__( self , snake_case_=-1 ) -> Union[str, Any]:
# in NER datasets, the last column is usually reserved for NER label
_a = label_idx
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(snake_case_ , snake_case_ ):
_a = mode.value
_a = os.path.join(snake_case_ , F'''{mode}.txt''' )
_a = 1
_a = []
with open(snake_case_ , encoding="utf-8" ) as f:
_a = []
_a = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
_a = []
_a = []
else:
_a = line.split(" " )
words.append(splits[0] )
if len(snake_case_ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
return examples
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(snake_case_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(snake_case_ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( a ):
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
_a = f.read().splitlines()
if "O" not in labels:
_a = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( a ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> List[InputExample]:
if isinstance(snake_case_ , snake_case_ ):
_a = mode.value
_a = os.path.join(snake_case_ , F'''{mode}.txt''' )
_a = 1
_a = []
with open(snake_case_ , encoding="utf-8" ) as f:
for sentence in parse_incr(snake_case_ ):
_a = []
_a = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(snake_case_ ) == len(snake_case_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
return examples
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Any:
_a = 0
for sentence in parse_incr(snake_case_ ):
_a = preds_list[example_id]
_a = ""
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(snake_case_ )
example_id += 1
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
if path:
with open(snake_case_ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = len(lowerCamelCase__ )
for i in range(length - 1 ):
_a = i
for k in range(i + 1, lowerCamelCase__ ):
if collection[k] < collection[least]:
_a = k
if least != i:
_a , _a = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case : List[str] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 705 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , a , )
class A ( a ):
__UpperCAmelCase : Tuple = RobertaConfig
__UpperCAmelCase : Any = """roberta"""
def __init__( self , snake_case_ ) -> str:
super().__init__(snake_case_ )
_a = RobertaEmbeddings(snake_case_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , a , )
class A ( a ):
__UpperCAmelCase : Any = RobertaConfig
__UpperCAmelCase : int = """roberta"""
def __init__( self , snake_case_ ) -> Tuple:
super().__init__(snake_case_ )
_a = config.num_labels
_a = config.num_hidden_layers
_a = DeeRobertaModel(snake_case_ )
_a = nn.Dropout(config.hidden_dropout_prob )
_a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case_ )
def __lowerCAmelCase ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=-1 , snake_case_=False , ) -> Dict:
_a = self.num_layers
try:
_a = self.roberta(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
_a = outputs[1]
_a = self.dropout(snake_case_ )
_a = self.classifier(snake_case_ )
_a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_a = e.message
_a = e.exit_layer
_a = outputs[0]
if not self.training:
_a = entropy(snake_case_ )
_a = []
_a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_a = []
for highway_exit in outputs[-1]:
_a = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_a = MSELoss()
_a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_a = CrossEntropyLoss()
_a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
_a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_a = (loss,) + outputs
if not self.training:
_a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy | 706 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__snake_case : Optional[int] = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a )
class A ( a ):
__UpperCAmelCase : Dict = """rag"""
__UpperCAmelCase : Dict = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=3_0_0 , snake_case_=7_6_8 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Optional[Any]:
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_a = kwargs.pop("question_encoder" )
_a = question_encoder_config.pop("model_type" )
_a = kwargs.pop("generator" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = reduce_loss
_a = label_smoothing
_a = exclude_bos_score
_a = do_marginalize
_a = title_sep
_a = doc_sep
_a = n_docs
_a = max_combined_length
_a = dataset
_a = dataset_split
_a = index_name
_a = retrieval_vector_size
_a = retrieval_batch_size
_a = passages_path
_a = index_path
_a = use_dummy_dataset
_a = output_retrieved
_a = do_deduplication
_a = use_cache
if self.forced_eos_token_id is None:
_a = getattr(self.generator , "forced_eos_token_id" , snake_case_ )
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = copy.deepcopy(self.__dict__ )
_a = self.question_encoder.to_dict()
_a = self.generator.to_dict()
_a = self.__class__.model_type
return output
| 691 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "spiece.model"}
__snake_case : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
__snake_case : List[Any] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class A ( a ):
__UpperCAmelCase : Dict = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_="<unk>" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="[SEP]" , snake_case_="[MASK]" , snake_case_="[CLS]" , snake_case_ = None , **snake_case_ , ) -> None:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , mask_token=snake_case_ , cls_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , snake_case_ ) -> Dict:
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
return self.sp_model.piece_to_id(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
_a = self.sp_model.IdToPiece(snake_case_ )
return token
def __lowerCAmelCase ( self , snake_case_ ) -> List[str]:
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(snake_case_ )
_a = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = None , snake_case_ = True , **snake_case_ , ) -> str:
_a = kwargs.pop("use_source_tokenizer" , snake_case_ )
_a = self.convert_ids_to_tokens(snake_case_ , skip_special_tokens=snake_case_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a = []
_a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
_a = []
sub_texts.append(snake_case_ )
else:
current_sub_text.append(snake_case_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case_ ) )
else:
_a = "".join(snake_case_ )
_a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a = self.clean_up_tokenization(snake_case_ )
return clean_text
else:
return text
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 707 |
'''simple docstring'''
class A :
def __init__( self ) -> List[str]:
_a = 0
_a = 0
_a = {}
def __lowerCAmelCase ( self , snake_case_ ) -> int:
if vertex not in self.adjacency:
_a = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
_a = weight
_a = weight
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
_a = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a = edges[i][2] + 1
for edge in edges:
_a , _a , _a = edge
_a = weight
_a = weight
def __str__( self ) -> Optional[int]:
_a = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( snake_case_=None , snake_case_=None ) -> Any:
_a = Graph()
if vertices is None:
_a = []
if edges is None:
_a = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class A :
def __init__( self ) -> Optional[int]:
_a = {}
_a = {}
def __len__( self ) -> List[Any]:
return len(self.parent )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
if item in self.parent:
return self.find(snake_case_ )
_a = item
_a = 0
return item
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
_a = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Optional[int]:
_a = self.find(snake_case_ )
_a = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( snake_case_ ) -> Tuple:
_a = graph.num_vertices
_a = Graph.UnionFind()
_a = []
while num_components > 1:
_a = {}
for vertex in graph.get_vertices():
_a = -1
_a = graph.get_edges()
for edge in edges:
_a , _a , _a = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a = edge
_a = union_find.find(snake_case_ )
_a = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
_a = num_components - 1
_a = Graph.build(edges=snake_case_ )
return mst
| 691 | 0 |
def _lowercase ( lowerCamelCase__ : int = 3, lowerCamelCase__ : int = 7, lowerCamelCase__ : int = 1_000_000 ):
_a = 0
_a = 1
for current_denominator in range(1, limit + 1 ):
_a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_a = current_numerator
_a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = AltDiffusionPipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_a = CLIPTextModel(snake_case_ )
_a = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_a = 7_7
_a = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Dict:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowerCAmelCase ( self ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self ) -> Tuple:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(snake_case_ )
_a = text_encoder
_a = AltDiffusionPipeline(**snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = self.get_dummy_inputs(snake_case_ )
_a = "A photo of an astronaut"
_a = alt_pipe(**snake_case_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[str]:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(snake_case_ )
_a = text_encoder
_a = AltDiffusionPipeline(**snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = self.get_dummy_inputs(snake_case_ )
_a = alt_pipe(**snake_case_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Tuple:
# make sure here that pndm scheduler skips prk
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_a = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case_ , safety_checker=snake_case_ )
_a = alt_pipe.to(snake_case_ )
alt_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="numpy" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 709 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case : Union[str, Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = {}
state_dict.pop("pixel_mean", lowerCamelCase__ )
state_dict.pop("pixel_std", lowerCamelCase__ )
_a = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(lowerCamelCase__, lowerCamelCase__ )
if re.match(lowerCamelCase__, lowerCamelCase__ ):
_a = int(re.match(lowerCamelCase__, lowerCamelCase__ ).group(2 ) )
if layer_nb == 0:
_a = key.replace("layers.0", "proj_in" )
elif layer_nb == 1:
_a = key.replace("layers.1", "layers.0" )
elif layer_nb == 2:
_a = key.replace("layers.2", "proj_out" )
_a = value
_a = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : str="ybelkada/segment-anything" ):
_a = hf_hub_download(lowerCamelCase__, F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1_024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1_280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
_a = SamConfig(
vision_config=lowerCamelCase__, )
_a = torch.load(lowerCamelCase__, map_location="cpu" )
_a = replace_keys(lowerCamelCase__ )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=lowerCamelCase__ )
_a = SamModel(lowerCamelCase__ )
hf_model.load_state_dict(lowerCamelCase__ )
_a = hf_model.to("cuda" )
_a = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
_a = Image.open(requests.get(lowerCamelCase__, stream=lowerCamelCase__ ).raw ).convert("RGB" )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(lowerCamelCase__ ), return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a = ((75, 275, 1_725, 850),)
_a = processor(images=np.array(lowerCamelCase__ ), input_boxes=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(lowerCamelCase__ ), input_points=lowerCamelCase__, input_labels=lowerCamelCase__, return_tensors="pt" ).to("cuda" )
with torch.no_grad():
_a = hf_model(**lowerCamelCase__ )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
__snake_case : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__snake_case : str = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 691 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( a ):
__UpperCAmelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[int] = """CLIPImageProcessor"""
__UpperCAmelCase : Optional[int] = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ) -> Any:
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ) -> Optional[int]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_a = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
_a = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> str:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __lowerCAmelCase ( self ) -> Dict:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 710 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Dict=0.9_99, lowerCamelCase__ : Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_a = []
for i in range(lowerCamelCase__ ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class A ( a , a ):
__UpperCAmelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : Optional[int] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.00_085 , snake_case_ = 0.012 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = "linspace" , snake_case_ = 0 , ) -> Optional[int]:
if trained_betas is not None:
_a = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_a = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(snake_case_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_a = 1.0 - self.betas
_a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Dict:
if schedule_timesteps is None:
_a = self.timesteps
_a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_a = 1 if len(snake_case_ ) > 1 else 0
else:
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
_a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Dict:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
_a = self.index_for_timestep(snake_case_ )
if self.state_in_first_order:
_a = self.sigmas[step_index]
else:
_a = self.sigmas_interpol[step_index]
_a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ) -> Union[str, Any]:
_a = num_inference_steps
_a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_a = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_a = torch.from_numpy(np.log(snake_case_ ) ).to(snake_case_ )
_a = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
_a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_a = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
# interpolate sigmas
_a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case_ ).startswith("mps" ):
# mps does not support float64
_a = torch.from_numpy(snake_case_ ).to(snake_case_ , dtype=torch.floataa )
else:
_a = torch.from_numpy(snake_case_ ).to(snake_case_ )
# interpolate timesteps
_a = self.sigma_to_t(snake_case_ ).to(snake_case_ , dtype=timesteps.dtype )
_a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_a = torch.cat([timesteps[:1], interleaved_timesteps] )
_a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_a = defaultdict(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ ) -> Optional[int]:
# get log sigma
_a = sigma.log()
# get distribution
_a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_a = low_idx + 1
_a = self.log_sigmas[low_idx]
_a = self.log_sigmas[high_idx]
# interpolate sigmas
_a = (low - log_sigma) / (low - high)
_a = w.clamp(0 , 1 )
# transform interpolation to time range
_a = (1 - w) * low_idx + w * high_idx
_a = t.view(sigma.shape )
return t
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.sample is None
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Union[SchedulerOutput, Tuple]:
_a = self.index_for_timestep(snake_case_ )
# advance index counter by 1
_a = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_a = self.sigmas[step_index]
_a = self.sigmas_interpol[step_index + 1]
_a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_a = self.sigmas[step_index - 1]
_a = self.sigmas_interpol[step_index]
_a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_a = 0
_a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_a = sigma_hat if self.state_in_first_order else sigma_interpol
_a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_a = sigma_interpol - sigma_hat
# store for 2nd order step
_a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_a = sigma_next - sigma_hat
_a = self.sample
_a = None
_a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
_a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_a = self.timesteps.to(original_samples.device )
_a = timesteps.to(original_samples.device )
_a = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
_a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_a = sigma.unsqueeze(-1 )
_a = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 691 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Tuple = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class A ( a ):
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class A ( a ):
def __init__( self , snake_case_ , snake_case_ = None ) -> str:
_a = max_length
_a = max_position_embeddings
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool:
_a = input_ids.shape[-1]
_a = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class A ( a ):
def __init__( self , snake_case_ , snake_case_ ) -> Dict:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , snake_case_ , )
_a = start_length
_a = max_new_tokens
_a = start_length + max_new_tokens
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool:
return input_ids.shape[-1] >= self.max_length
class A ( a ):
def __init__( self , snake_case_ , snake_case_ = None ) -> Optional[int]:
_a = max_time
_a = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class A ( a ):
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , **snake_case_ ) -> bool:
return any(criteria(snake_case_ , snake_case_ ) for criteria in self )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(snake_case_ , snake_case_ ):
return stopping_criterium.max_length
elif isinstance(snake_case_ , snake_case_ ):
return stopping_criterium.max_length
return None
def _lowercase ( lowerCamelCase__ : StoppingCriteriaList, lowerCamelCase__ : int ):
_a = stopping_criteria.max_length
_a = deepcopy(lowerCamelCase__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", lowerCamelCase__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCamelCase__ ) )
return new_stopping_criteria
| 711 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int, lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
# Base Case
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index], lowerCamelCase__, lowerCamelCase__ ):
# Color current vertex
_a = i
# Validate coloring
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, index + 1 ):
return True
# Backtrack
_a = -1
return False
def _lowercase ( lowerCamelCase__ : list[list[int]], lowerCamelCase__ : int ):
_a = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, 0 ):
return colored_vertices
return []
| 691 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Any:
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
_a , _a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=snake_case_ )
assert mmeta["long_pair"] == "heb-eng"
| 712 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=3_2 , snake_case_=3 , snake_case_=1_0 , snake_case_=[8, 1_6, 3_2, 6_4] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=1 , ) -> Any:
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(snake_case_ )
_a = out_features
_a = out_indices
_a = num_groups
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> str:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = BitModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_a = self.num_labels
_a = BitForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = BitBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a = None
_a = BitBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowerCAmelCase ( self ) -> int:
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCAmelCase : List[Any] = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = False
def __lowerCAmelCase ( self ) -> Dict:
_a = BitModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="Bit does not output attentions" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(snake_case_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def __lowerCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(config=snake_case_ )
for name, module in model.named_modules():
if isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_a = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = BitModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowerCAmelCase ( self ) -> Any:
_a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case_ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_a = model(**snake_case_ )
# verify the logits
_a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_a = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
@require_torch
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
__UpperCAmelCase : int = BitConfig
__UpperCAmelCase : int = False
def __lowerCAmelCase ( self ) -> List[str]:
_a = BitModelTester(self )
| 713 |
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( a ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> int:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = DistilBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = DistilBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = DistilBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(
snake_case_ , attention_mask=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = self.num_labels
_a = DistilBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
_a = self.num_labels
_a = DistilBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
_a = self.num_choices
_a = DistilBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
snake_case_ , attention_mask=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = config_and_inputs
_a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[int] = True
def __lowerCAmelCase ( self ) -> Any:
_a = DistilBertModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , dim=3_7 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case_ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DistilBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> List[str]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_a = True
_a = model_class(config=snake_case_ )
_a = self._prepare_for_class(snake_case_ , snake_case_ )
_a = torch.jit.trace(
snake_case_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case_ , os.path.join(snake_case_ , "traced_model.pt" ) )
_a = torch.jit.load(os.path.join(snake_case_ , "traced_model.pt" ) , map_location=snake_case_ )
loaded(inputs_dict["input_ids"].to(snake_case_ ) , inputs_dict["attention_mask"].to(snake_case_ ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
_a = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_a = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(snake_case_ , attention_mask=snake_case_ )[0]
_a = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case_ )
_a = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) )
| 714 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger("transformers.models.encodec")
__snake_case : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__snake_case : int = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__snake_case : Optional[int] = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__snake_case : Tuple = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__snake_case : int = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__snake_case : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case : Tuple = []
__snake_case : Optional[int] = []
def _lowercase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : List[Any] ):
for attribute in key.split("." ):
_a = getattr(lowerCamelCase__, lowerCamelCase__ )
if weight_type is not None:
_a = getattr(lowerCamelCase__, lowerCamelCase__ ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "weight_ih_l0":
_a = value
elif weight_type == "weight_hh_l0":
_a = value
elif weight_type == "bias_ih_l0":
_a = value
elif weight_type == "bias_hh_l0":
_a = value
elif weight_type == "weight_ih_l1":
_a = value
elif weight_type == "weight_hh_l1":
_a = value
elif weight_type == "bias_ih_l1":
_a = value
elif weight_type == "bias_hh_l1":
_a = value
else:
_a = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : str ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Any, lowerCamelCase__ : int ):
_a = []
if model_name == "encodec_24khz" or "encodec_32khz":
_a = MAPPING_24K
elif model_name == "encodec_48khz":
_a = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__, lowerCamelCase__ ):
logger.info(F'''{name} was ignored''' )
continue
_a = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_a , _a = key.split(".*." )
if prefix in name and suffix in name:
_a = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_a = True
if "*" in mapped_key:
_a = name.split(lowerCamelCase__ )[0].split("." )[-2]
_a = mapped_key.replace("*", lowerCamelCase__ )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "weight_ih_l0" in name:
_a = "weight_ih_l0"
elif "weight_hh_l0" in name:
_a = "weight_hh_l0"
elif "bias_ih_l0" in name:
_a = "bias_ih_l0"
elif "bias_hh_l0" in name:
_a = "bias_hh_l0"
elif "weight_ih_l1" in name:
_a = "weight_ih_l1"
elif "weight_hh_l1" in name:
_a = "weight_hh_l1"
elif "bias_ih_l1" in name:
_a = "bias_ih_l1"
elif "bias_hh_l1" in name:
_a = "bias_hh_l1"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : str=None, lowerCamelCase__ : List[Any]=None, ):
if config_path is not None:
_a = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
_a = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_a = [8, 5, 4, 4]
_a = [2.2]
_a = 64
_a = 32_000
_a = 2_048
_a = False
_a = False
_a = False
elif model_name == "encodec_48khz":
_a = [8, 5, 4, 2]
_a = [3.0, 6.0, 12.0, 24.0]
_a = 48_000
_a = 2
_a = False
_a = "time_group_norm"
_a = True
_a = 1.0
_a = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_a = EncodecModel(lowerCamelCase__ )
_a = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase__ )
_a = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_a = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__snake_case : List[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 691 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *snake_case_ , **snake_case_ ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class A ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> int:
_a = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_a = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
_a = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
] , )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
_a = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_a = image_classifier(snake_case_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
_a = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
[
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
{"score": 0.333, "label": ANY(snake_case_ )},
],
] , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
_a = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_a = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
_a = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[Any]:
_a = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_a = image_classifier(snake_case_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
_a = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691 | 0 |
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *lowerCamelCase__ : str ):
with open(lowerCamelCase__, "r" ) as fh:
fcntl.flock(lowerCamelCase__, fcntl.LOCK_EX )
try:
print(*lowerCamelCase__ )
finally:
fcntl.flock(lowerCamelCase__, fcntl.LOCK_UN )
__snake_case : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__snake_case : Dict = torch.device("cuda", local_rank)
__snake_case : str = socket.gethostname()
__snake_case : Union[str, Any] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Optional[int] = dist.get_rank()
__snake_case : Optional[Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 716 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
class A ( a ):
__UpperCAmelCase : str = """encoder-decoder"""
__UpperCAmelCase : List[str] = True
def __init__( self , **snake_case_ ) -> Optional[Any]:
super().__init__(**snake_case_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_a = kwargs.pop("encoder" )
_a = encoder_config.pop("model_type" )
_a = kwargs.pop("decoder" )
_a = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = AutoConfig.for_model(snake_case_ , **snake_case_ )
_a = True
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , snake_case_ , **snake_case_ ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_a = True
_a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = copy.deepcopy(self.__dict__ )
_a = self.encoder.to_dict()
_a = self.decoder.to_dict()
_a = self.__class__.model_type
return output
| 717 |
'''simple docstring'''
__snake_case : Dict = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 691 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( a ):
__UpperCAmelCase : int = """Wav2Vec2FeatureExtractor"""
__UpperCAmelCase : str = """AutoTokenizer"""
def __init__( self , snake_case_ , snake_case_ ) -> Union[str, Any]:
super().__init__(snake_case_ , snake_case_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls , snake_case_ , **snake_case_ ) -> List[str]:
try:
return super().from_pretrained(snake_case_ , **snake_case_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(snake_case_ , **snake_case_ )
_a = WavaVecaCTCTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(feature_extractor=snake_case_ , tokenizer=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_a = kwargs.pop("raw_speech" )
else:
_a = kwargs.pop("audio" , snake_case_ )
_a = kwargs.pop("sampling_rate" , snake_case_ )
_a = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_a = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if text is not None:
_a = self.tokenizer(snake_case_ , **snake_case_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings["input_ids"]
return inputs
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case_ , **snake_case_ )
_a = kwargs.pop("input_features" , snake_case_ )
_a = kwargs.pop("labels" , snake_case_ )
if len(snake_case_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
if labels is not None:
_a = self.tokenizer.pad(snake_case_ , **snake_case_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels["input_ids"]
return input_features
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> str:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> Tuple:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@contextmanager
def __lowerCAmelCase ( self ) -> Dict:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 718 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Any:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> int:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Tuple:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 691 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__snake_case : Union[str, Any] = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 719 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowercase ( ):
_a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt", type=lowerCamelCase__, default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs", type=lowerCamelCase__, default=5 )
parser.add_argument("--batch_size", type=lowerCamelCase__, default=6 )
parser.add_argument("--gradient_accumulation_steps", type=lowerCamelCase__, default=1 )
parser.add_argument("--freeze", type=lowerCamelCase__, default=lowerCamelCase__ )
parser.add_argument("--learning_rate", type=lowerCamelCase__, default=5e-4 )
parser.add_argument("--seed", type=lowerCamelCase__, default=0 )
parser.add_argument("--lr_scheduler_type", type=lowerCamelCase__, default="cosine" )
parser.add_argument("--num_warmup_steps", type=lowerCamelCase__, default=10 )
parser.add_argument("--weight_decay", type=lowerCamelCase__, default=0.01 )
parser.add_argument("--output_dir", type=lowerCamelCase__, default="./results" )
return parser.parse_args()
__snake_case : str = load("accuracy")
def _lowercase ( lowerCamelCase__ : List[str] ):
_a , _a = eval_pred
_a = np.argmax(lowerCamelCase__, axis=1 )
return metric.compute(predictions=lowerCamelCase__, references=lowerCamelCase__ )
class A ( a ):
def __init__( self , snake_case_ ) -> None:
super().__init__()
_a = trainer
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> Optional[int]:
if control.should_evaluate:
_a = deepcopy(snake_case_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowercase ( ):
_a = get_args()
set_seed(args.seed )
_a = load_dataset("codeparrot/codecomplex", split="train" )
_a = dataset.train_test_split(test_size=0.2 )
_a = train_test["test"].train_test_split(test_size=0.5 )
_a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
_a = AutoTokenizer.from_pretrained(args.model_ckpt )
_a = tokenizer.eos_token
_a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7 )
_a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_a = False
_a = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ : Tuple ):
_a = tokenizer(example["src"], truncation=lowerCamelCase__, max_length=1_024 )
_a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_a = train_test_validation.map(
lowerCamelCase__, batched=lowerCamelCase__, remove_columns=train_test_validation["train"].column_names, )
_a = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
_a = TrainingArguments(
output_dir=args.output_dir, learning_rate=args.learning_rate, lr_scheduler_type=args.lr_scheduler_type, evaluation_strategy="epoch", save_strategy="epoch", logging_strategy="epoch", per_device_train_batch_size=args.batch_size, per_device_eval_batch_size=args.batch_size, num_train_epochs=args.num_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, weight_decay=0.01, metric_for_best_model="accuracy", run_name="complexity-java", report_to="wandb", )
_a = Trainer(
model=lowerCamelCase__, args=lowerCamelCase__, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["valid"], tokenizer=lowerCamelCase__, data_collator=lowerCamelCase__, compute_metrics=lowerCamelCase__, )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 691 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number | (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ):
_a = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_a = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_a = F'''{src_lang}-{tgt_lang}'''
_a = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCamelCase__, exist_ok=lowerCamelCase__ )
_a = os.path.join(lowerCamelCase__, "README.md" )
print(F'''Generating {path}''' )
with open(lowerCamelCase__, "w", encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
# make sure we are under the root of the project
__snake_case : int = Path(__file__).resolve().parent.parent.parent
__snake_case : int = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__snake_case : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 691 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.