code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE__ = 6378137.0
SCREAMING_SNAKE_CASE__ = 6356752.314245
SCREAMING_SNAKE_CASE__ = 637_8137
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> float:
__lowercase = (AXIS_A - AXIS_B) / AXIS_A
__lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowercase = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowercase = radians(SCREAMING_SNAKE_CASE )
__lowercase = radians(SCREAMING_SNAKE_CASE )
# Equation
__lowercase = sin((phi_a - phi_a) / 2 )
__lowercase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowercase = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict=28123 ) -> List[Any]:
__lowercase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase = set()
__lowercase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> bool:
__lowercase = [int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input().strip()
SCREAMING_SNAKE_CASE__ = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__lowercase = tmp_path_factory.getbasetemp() / 'cache'
__lowercase = test_hf_cache_home / 'datasets'
__lowercase = test_hf_cache_home / 'metrics'
__lowercase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
__lowercase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE , scope='session' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE )
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
SCREAMING_SNAKE_CASE__ = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
SCREAMING_SNAKE_CASE__ = """▁"""
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : Optional[Any]="<unk>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : int , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
__lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__lowercase = len(self.sp_model ) - 1
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def a__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
return len(self.sp_model )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self : Optional[int] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(_UpperCAmelCase )
return spm_id if spm_id else self.unk_token_id
def a__ ( self : Union[str, Any] , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = []
__lowercase = ''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__lowercase = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __getstate__( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(SCREAMING_SNAKE_CASE ), magnitude * sin(SCREAMING_SNAKE_CASE )]
return [magnitude * cos(radians(SCREAMING_SNAKE_CASE ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE ) )]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : NDArray[floataa] , SCREAMING_SNAKE_CASE : float = 10**-1 ) -> bool:
__lowercase = cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = sum(SCREAMING_SNAKE_CASE )
return abs(SCREAMING_SNAKE_CASE ) < eps
if __name__ == "__main__":
# Test to check if it works
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
SCREAMING_SNAKE_CASE__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
SCREAMING_SNAKE_CASE__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
SCREAMING_SNAKE_CASE__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
SCREAMING_SNAKE_CASE__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 32
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 , SCREAMING_SNAKE_CASE : str = "bert-base-cased" ) -> Union[str, Any]:
__lowercase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__lowercase = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> str:
model.eval()
__lowercase = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__lowercase = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ) -> int:
# Initialize accelerator
__lowercase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config['lr']
__lowercase = int(config['num_epochs'] )
__lowercase = int(config['seed'] )
__lowercase = int(config['batch_size'] )
__lowercase = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__lowercase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__lowercase = 1
__lowercase = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__lowercase = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__lowercase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase = 0
__lowercase = evaluate.load('glue' , 'mrpc' )
__lowercase = num_epochs
if args.partial_train_epoch is not None:
__lowercase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowercase = args.resume_from_checkpoint.split('epoch_' )[1]
__lowercase = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowercase = int(SCREAMING_SNAKE_CASE ) + 1
__lowercase = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint performance:' , SCREAMING_SNAKE_CASE )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
__lowercase = json.load(SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowercase = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.loss
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowercase = F"""epoch_{epoch}"""
__lowercase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
__lowercase = evaluation_loop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = accuracy
__lowercase = lr_scheduler.get_lr()[0]
__lowercase = optimizer.param_groups[0]['lr']
__lowercase = epoch
__lowercase = overall_step
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE , default=2 , help='Number of train epochs.' , )
__lowercase = parser.parse_args()
__lowercase = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__ = """us-east-1""" # defaults region
@dataclass
class A__ :
lowerCAmelCase__ : str
lowerCAmelCase__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
lowerCAmelCase__ : Union[str, Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
lowerCAmelCase__ : Any = {**hyperparameters, "max_steps": 1000}
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return f"""{self.framework}-transfromers-test"""
@property
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def a__ ( self : Dict ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
__lowercase = SageMakerTestEnvironment(framework=request.cls.framework )
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
SCREAMING_SNAKE_CASE__ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
SCREAMING_SNAKE_CASE__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
SCREAMING_SNAKE_CASE__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
SCREAMING_SNAKE_CASE__ = out / out.max() * 255
SCREAMING_SNAKE_CASE__ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
__lowercase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowercase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowercase = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss
__lowercase = -(labels.shape[-1] * loss.item())
__lowercase = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
SCREAMING_SNAKE_CASE__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
SCREAMING_SNAKE_CASE__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
SCREAMING_SNAKE_CASE__ = reader.read()
SCREAMING_SNAKE_CASE__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE__ = config[key]
del config[key]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
SCREAMING_SNAKE_CASE__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
SCREAMING_SNAKE_CASE__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
SCREAMING_SNAKE_CASE__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
SCREAMING_SNAKE_CASE__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
SCREAMING_SNAKE_CASE__ = param_value
SCREAMING_SNAKE_CASE__ = True
if not has_changed:
SCREAMING_SNAKE_CASE__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE__ = """main"""
# Default branch name
SCREAMING_SNAKE_CASE__ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE__ = """aaaaaaa"""
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE__ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE__ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> str:
print('Bonjour!' )
yield
print('Au revoir!' )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A__ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_tf
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_flax
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : bytes ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 4000000 ) -> int:
__lowercase = []
__lowercase , __lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
# Construct model
if gpta_config_file == "":
__lowercase = GPTaConfig()
else:
__lowercase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE )
__lowercase = GPTaModel(SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
import colorsys
from PIL import Image # type: ignore
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = x
__lowercase = y
for step in range(SCREAMING_SNAKE_CASE ): # noqa: B007
__lowercase = a * a - b * b + x
__lowercase = 2 * a * b + y
__lowercase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE , 1 , 1 ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800 , SCREAMING_SNAKE_CASE : int = 600 , SCREAMING_SNAKE_CASE : float = -0.6 , SCREAMING_SNAKE_CASE : float = 0 , SCREAMING_SNAKE_CASE : float = 3.2 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : bool = True , ) -> Image.Image:
__lowercase = Image.new('RGB' , (image_width, image_height) )
__lowercase = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE ):
for image_y in range(SCREAMING_SNAKE_CASE ):
# determine the figure-coordinates based on the image-coordinates
__lowercase = figure_width / image_width * image_height
__lowercase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowercase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowercase = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowercase = get_color_coded_rgb(SCREAMING_SNAKE_CASE )
else:
__lowercase = get_black_and_white_rgb(SCREAMING_SNAKE_CASE )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Optional[Any]="<sep>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : int="<cls>" , _UpperCAmelCase : str="<mask>" , _UpperCAmelCase : Any=["<eop>", "<eod>"] , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ) -> None:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
__lowercase = jieba
__lowercase = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : int , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__ ( self : Any , _UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
if self.remove_space:
__lowercase = ' '.join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
__lowercase = unicodedata.normalize('NFKD' , _UpperCAmelCase )
__lowercase = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def a__ ( self : List[str] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.preprocess_text(_UpperCAmelCase )
__lowercase = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
__lowercase = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def a__ ( self : Tuple , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowercase = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip()
return out_string
def a__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def a__ ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = super()._decode(*_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Dict:
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowercase = Image.open(SCREAMING_SNAKE_CASE )
return im.convert('RGB' )
@dataclass
class A__ :
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase__ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase__ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A__ :
lowerCAmelCase__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase__ : str = field(default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = torch.stack([example['pixel_values'] for example in examples] )
__lowercase = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowercase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowercase = {}
if data_args.train_dir is not None:
__lowercase = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowercase = os.path.join(data_args.validation_dir , '**' )
__lowercase = load_dataset(
'imagefolder' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowercase = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__lowercase = dataset['train'].train_test_split(data_args.train_val_split )
__lowercase = split['train']
__lowercase = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowercase = dataset['train'].features['labels'].names
__lowercase , __lowercase = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__lowercase = str(SCREAMING_SNAKE_CASE )
__lowercase = label
# Load the accuracy metric from the datasets package
__lowercase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : str ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowercase = image_processor.size['shortest_edge']
else:
__lowercase = (image_processor.size['height'], image_processor.size['width'])
__lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowercase = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowercase = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE : str ):
__lowercase = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE : List[str] ):
__lowercase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowercase = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowercase = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
__lowercase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__lowercase = None
if training_args.resume_from_checkpoint is not None:
__lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase = last_checkpoint
__lowercase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowercase = trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__lowercase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class A__ ( lowerCAmelCase__ ):
def __lt__( self : Optional[int] , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Optional[int] , _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
return self[-1] == other[-1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> list:
__lowercase = []
# sort into stacks
for element in collection:
__lowercase = Stack([element] )
__lowercase = bisect_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if i != len(SCREAMING_SNAKE_CASE ):
stacks[i].append(SCREAMING_SNAKE_CASE )
else:
stacks.append(SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__lowercase = merge(*(reversed(SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A__ ( unittest.TestCase ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = inspect.getfile(accelerate.test_utils )
__lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowercase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__lowercase = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : bytes , SCREAMING_SNAKE_CASE : int ) -> np.array:
__lowercase = F"""{sampling_rate}"""
__lowercase = '1'
__lowercase = 'f32le'
__lowercase = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__lowercase = output_stream[0]
__lowercase = np.frombuffer(SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : str = "f32le" , ) -> List[Any]:
__lowercase = F"""{sampling_rate}"""
__lowercase = '1'
if format_for_conversion == "s16le":
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
__lowercase = platform.system()
if system == "Linux":
__lowercase = 'alsa'
__lowercase = 'default'
elif system == "Darwin":
__lowercase = 'avfoundation'
__lowercase = ':0'
elif system == "Windows":
__lowercase = 'dshow'
__lowercase = 'default'
__lowercase = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase = _ffmpeg_stream(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , SCREAMING_SNAKE_CASE : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
__lowercase = stream_chunk_s
else:
__lowercase = chunk_length_s
__lowercase = ffmpeg_microphone(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , format_for_conversion=SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
__lowercase = np.intaa
__lowercase = 2
elif format_for_conversion == "f32le":
__lowercase = np.floataa
__lowercase = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
__lowercase = chunk_length_s / 6
__lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
__lowercase = [stride_length_s, stride_length_s]
__lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase = datetime.datetime.now()
__lowercase = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
__lowercase = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE )
__lowercase = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple[int, int] , SCREAMING_SNAKE_CASE : bool = False ) -> int:
__lowercase = b''
__lowercase , __lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
__lowercase = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE ) < chunk_len:
__lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
__lowercase = (_stride_left, stride_right)
__lowercase = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
__lowercase = False
yield item
__lowercase = stride_left
__lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE ) > stride_left:
__lowercase = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__lowercase = False
yield item
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ) -> Any:
__lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
__lowercase = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ) -> float:
__lowercase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__lowercase = 1 - (matter_density + radiation_density + dark_energy)
__lowercase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowercase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : str = "mgp-str"
def __init__( self : Tuple , _UpperCAmelCase : int=[32, 1_28] , _UpperCAmelCase : str=4 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : str=27 , _UpperCAmelCase : List[str]=38 , _UpperCAmelCase : Optional[Any]=5_02_57 , _UpperCAmelCase : Union[str, Any]=3_05_22 , _UpperCAmelCase : List[str]=7_68 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Dict=4.0 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Union[str, Any]=0.02 , **_UpperCAmelCase : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = max_token_length
__lowercase = num_character_labels
__lowercase = num_bpe_labels
__lowercase = num_wordpiece_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = mlp_ratio
__lowercase = distilled
__lowercase = layer_norm_eps
__lowercase = drop_rate
__lowercase = qkv_bias
__lowercase = attn_drop_rate
__lowercase = drop_path_rate
__lowercase = output_aa_attentions
__lowercase = initializer_range | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = RoFormerTokenizer
lowerCAmelCase__ : Tuple = RoFormerTokenizerFast
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : int = True
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUp()
def a__ ( self : Any , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_UpperCAmelCase )
def a__ ( self : Dict , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **_UpperCAmelCase )
def a__ ( self : str ) -> Any:
"""simple docstring"""
__lowercase = '永和服装饰品有限公司,今天天气非常好'
__lowercase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_rust_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : str ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
| 700 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : int=5_02_65 , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[str]=30_72 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : int=64 , _UpperCAmelCase : List[str]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = version.parse("1.12" )
@property
def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : Any ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return 12
def a__ ( self : Tuple , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__lowercase = set()
# Replace all the whitespace in our sentence
__lowercase = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE ) == 26
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__lowercase = [False] * 26
for char in input_str:
if char.islower():
__lowercase = True
elif char.isupper():
__lowercase = True
return all(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
__lowercase = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('is_pangram_faster()' , setup=SCREAMING_SNAKE_CASE ) )
print(timeit('is_pangram_fastest()' , setup=SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[str] = ["pixel_values"]
def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def a__ ( self : int , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase = (size['height'], size['width'])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 688 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """▁"""
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : str = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = ["input_ids", "attention_mask"]
lowerCAmelCase__ : List[int] = []
lowerCAmelCase__ : List[int] = []
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : Optional[int]="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
__lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase = 1
__lowercase = len(self.sp_model )
__lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
__lowercase = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase = src_lang if src_lang is not None else 'en_XX'
__lowercase = self.lang_code_to_id[self._src_lang]
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
__lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ ( self : Optional[int] , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__lowercase = [1] * len(self.prefix_tokens )
__lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def a__ ( self : str , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowercase = src_lang
__lowercase = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = self.convert_tokens_to_ids(_UpperCAmelCase )
__lowercase = tgt_lang_id
return inputs
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self : Any , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self : int , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self : str , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip()
return out_string
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def a__ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "en_XX" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "ro_RO" , **_UpperCAmelCase : Tuple , ) -> BatchEncoding:
"""simple docstring"""
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__ ( self : int ) -> Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
__lowercase = self.lang_code_to_id[src_lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
def a__ ( self : Optional[int] , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
__lowercase = self.lang_code_to_id[lang]
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __SCREAMING_SNAKE_CASE ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = random_quotes()
pprint.pprint(response)
| 704 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Any:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowercase = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
__lowercase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
__lowercase = 4
__lowercase = True
# hparam_utils.py hparams
__lowercase = 0.664_694
__lowercase = 0.207_951
__lowercase = 0.121_194
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = 0.0_352_513
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowercase = 4
__lowercase = False
# hparam_utils.py hparams
__lowercase = 36.4_519
__lowercase = 0.903_421
__lowercase = 222.088
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 0.763_141
__lowercase = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
__lowercase = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE )
elif task == "MLM":
__lowercase = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
__lowercase = TapasModel(config=SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowercase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
SCREAMING_SNAKE_CASE__ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
SCREAMING_SNAKE_CASE__ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = VOCAB_FILES_NAMES
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[Any] = ConvBertTokenizer
def __init__( self : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]="[UNK]" , _UpperCAmelCase : str="[SEP]" , _UpperCAmelCase : Optional[int]="[PAD]" , _UpperCAmelCase : Any="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_UpperCAmelCase )
__lowercase = do_lower_case
def a__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=None ) -> Optional[int]:
"""simple docstring"""
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 705 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int | float | str ) -> tuple[int, int]:
try:
__lowercase = float(SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('Please enter a valid number' )
__lowercase = decimal - int(SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(SCREAMING_SNAKE_CASE ), 1
else:
__lowercase = len(str(SCREAMING_SNAKE_CASE ).split('.' )[1] )
__lowercase = int(decimal * (10**number_of_frac_digits) )
__lowercase = 10**number_of_frac_digits
__lowercase , __lowercase = denominator, numerator
while True:
__lowercase = dividend % divisor
if remainder == 0:
break
__lowercase , __lowercase = divisor, remainder
__lowercase , __lowercase = numerator / divisor, denominator / divisor
return int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 706 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """Hello, World!"""
SCREAMING_SNAKE_CASE__ = """en_XX"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> Optional[int]:
__lowercase = Path('data_bin' )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
__lowercase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'].dense.weight
__lowercase = xmod.model.classification_heads['mnli'].dense.bias
__lowercase = xmod.model.classification_heads['mnli'].out_proj.weight
__lowercase = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
__lowercase = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
__lowercase = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowercase = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 688 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowercase = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_60_00,
'return_attention_mask': False,
'do_normalize': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
# load decoder from hub
__lowercase = 'hf-internal-testing/ngram-beam-search-decoder'
def a__ ( self : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : Optional[int] , **_UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a__ ( self : Tuple , **_UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(_UpperCAmelCase , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = floats_list((3, 10_00) )
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = 'This is a test string'
__lowercase = processor(text=_UpperCAmelCase )
__lowercase = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=(2, 10, 16) , _UpperCAmelCase : Optional[int]=77 ) -> List[Any]:
"""simple docstring"""
np.random.seed(_UpperCAmelCase )
return np.random.rand(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase = processor.decode(_UpperCAmelCase )
__lowercase = decoder.decode_beams(_UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_UpperCAmelCase )
else:
with get_context(_UpperCAmelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(_UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCAmelCase , decoded_processor.lm_score )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -20.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__lowercase = decoded_processor_out.text
__lowercase = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCAmelCase )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _UpperCAmelCase , atol=1e-3 ) )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -20.0
__lowercase = True
__lowercase = processor.batch_decode(
_UpperCAmelCase , alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
__lowercase = decoded_processor_out.text
__lowercase = list(_UpperCAmelCase )
decoder.reset_params(
alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
with get_context('fork' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCAmelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowercase = os.listdir(_UpperCAmelCase )
__lowercase = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_UpperCAmelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowercase = os.listdir(_UpperCAmelCase )
__lowercase = os.listdir(_UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowercase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowercase = floats_list((3, 10_00) )
__lowercase = processor_wavaveca(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor_auto(_UpperCAmelCase , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_UpperCAmelCase )
__lowercase = processor_auto.batch_decode(_UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a__ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
import torch
__lowercase = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCAmelCase )
__lowercase = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowercase = iter(_UpperCAmelCase )
__lowercase = next(_UpperCAmelCase )
__lowercase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowercase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowercase = model(_UpperCAmelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] , output_word_offsets=_UpperCAmelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowercase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , _UpperCAmelCase )
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'start_time' ) )
__lowercase = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'end_time' ) )
# fmt: off
__lowercase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__lowercase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
| 707 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ) -> float:
__lowercase = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowercase = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__lowercase = int(input('enter the numbers of values: ' ) )
__lowercase = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
__lowercase = 0
print('enter the values of parameters in a list: ' )
__lowercase = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(SCREAMING_SNAKE_CASE ):
__lowercase = float(input() )
__lowercase = int(input('enter the value to interpolate: ' ) )
__lowercase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__lowercase = y[j + 1][i - 1] - y[j][i - 1]
__lowercase = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 688 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = "swin2sr"
lowerCAmelCase__ : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Optional[Any]=1_80 , _UpperCAmelCase : Tuple=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase : Optional[int]=[6, 6, 6, 6, 6, 6] , _UpperCAmelCase : Any=8 , _UpperCAmelCase : List[str]=2.0 , _UpperCAmelCase : int=True , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : int="1conv" , _UpperCAmelCase : int="pixelshuffle" , **_UpperCAmelCase : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(_UpperCAmelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = upscale
__lowercase = img_range
__lowercase = resi_connection
__lowercase = upsampler
| 708 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
__lowercase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE )
__lowercase = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[Any] = DiTPipeline
lowerCAmelCase__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCAmelCase__ : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase__ : Optional[Any] = False
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_UpperCAmelCase , )
__lowercase = AutoencoderKL()
__lowercase = DDIMScheduler()
__lowercase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith('mps' ):
__lowercase = torch.manual_seed(_UpperCAmelCase )
else:
__lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__lowercase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = 'cpu'
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase = pipe(**_UpperCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowercase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1e-3 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
def a__ ( self : str ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = torch.manual_seed(0 )
__lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowercase = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowercase = pipe.get_label_ids(_UpperCAmelCase )
__lowercase = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowercase = ['vase', 'umbrella']
__lowercase = pipe.get_label_ids(_UpperCAmelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 709 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__lowercase = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__lowercase = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 688 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
__lowercase = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
__lowercase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__lowercase = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
__lowercase = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 711 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""jukebox""": 512,
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=["v3", "v2", "v2"] , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Union[str, Any]="<|endoftext|>" , **_UpperCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
__lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
unk_token=_UpperCAmelCase , n_genres=_UpperCAmelCase , version=_UpperCAmelCase , max_n_lyric_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = version
__lowercase = max_n_lyric_tokens
__lowercase = n_genres
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__lowercase = json.load(_UpperCAmelCase )
__lowercase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__lowercase = oov.replace(R'\-\'' , R'\-+\'' )
__lowercase = regex.compile(_UpperCAmelCase )
__lowercase = {v: k for k, v in self.artists_encoder.items()}
__lowercase = {v: k for k, v in self.genres_encoder.items()}
__lowercase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = [self.artists_encoder.get(_UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_UpperCAmelCase ) ):
__lowercase = [self.genres_encoder.get(_UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__lowercase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__lowercase = [[self.lyrics_encoder.get(_UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def a__ ( self : str , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return list(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = self.prepare_for_tokenization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = self._tokenize(_UpperCAmelCase )
return artist, genre, lyrics
def a__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__lowercase = artists[idx].lower()
__lowercase = [genres[idx].lower()]
else:
__lowercase = self._normalize(artists[idx] ) + '.v2'
__lowercase = [
self._normalize(_UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__lowercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__lowercase = {vocab[index]: index + 1 for index in range(len(_UpperCAmelCase ) )}
__lowercase = 0
__lowercase = len(_UpperCAmelCase ) + 1
__lowercase = self.vocab
__lowercase = {v: k for k, v in self.vocab.items()}
__lowercase = ''
else:
__lowercase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__lowercase = self._run_strip_accents(_UpperCAmelCase )
__lowercase = lyrics.replace('\\' , '\n' )
__lowercase = self.out_of_vocab.sub('' , _UpperCAmelCase ), [], []
return artists, genres, lyrics
def a__ ( self : Tuple , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = unicodedata.normalize('NFD' , _UpperCAmelCase )
__lowercase = []
for char in text:
__lowercase = unicodedata.category(_UpperCAmelCase )
if cat == "Mn":
continue
output.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = (
[chr(_UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__lowercase = frozenset(_UpperCAmelCase )
__lowercase = re.compile(R'_+' )
__lowercase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__lowercase = pattern.sub('_' , _UpperCAmelCase ).strip('_' )
return text
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
return " ".join(_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = TensorType(_UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__lowercase = tf.constant
__lowercase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__lowercase = torch.tensor
__lowercase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__lowercase = jnp.array
__lowercase = _is_jax
else:
__lowercase = np.asarray
__lowercase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__lowercase = [inputs]
if not is_tensor(_UpperCAmelCase ):
__lowercase = as_tensor(_UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int="" , _UpperCAmelCase : Tuple="pt" ) -> BatchEncoding:
"""simple docstring"""
__lowercase = [0, 0, 0]
__lowercase = [artist] * len(self.version )
__lowercase = [genres] * len(self.version )
__lowercase , __lowercase , __lowercase = self.tokenize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase , __lowercase = self._convert_token_to_id(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = [-INFINITY] * len(full_tokens[-1] )
__lowercase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_UpperCAmelCase ) )
__lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def a__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.artists_decoder.get(_UpperCAmelCase )
__lowercase = [self.genres_decoder.get(_UpperCAmelCase ) for genre in genres_index]
__lowercase = [self.lyrics_decoder.get(_UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 688 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 712 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=36 , _UpperCAmelCase : Optional[Any]=6 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Any=6 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Any=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[Any] = True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int=False ) -> Tuple:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = AlbertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a__ ( self : int ) -> Any:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = AlbertModel.from_pretrained('albert-base-v2' )
__lowercase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 688 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class A__ ( unittest.TestCase ):
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ['a', 'b', 'c']
# Defaults to last layer if both are None
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['c'] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
__lowercase , __lowercase = get_aligned_output_features_output_indices(['a', 'c'] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['a', 'c'] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def a__ ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = BackboneMixin()
__lowercase = ['a', 'b', 'c']
__lowercase = ['a', 'c']
__lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowercase = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowercase = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 713 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[str] , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__lowercase = TemplateProcessing(
single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> str:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : int , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
__lowercase = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 688 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
__lowercase = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_UpperCAmelCase , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version='py36' , )
def a__ ( self : List[Any] , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.create_estimator(_UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "retribert"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=3_05_22 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[Any]=1_28 , _UpperCAmelCase : Optional[int]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = share_encoders
__lowercase = projection_dim
| 688 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LxmertTokenizer
lowerCAmelCase__ : Dict = LxmertTokenizerFast
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : str = True
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : int , _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = 'I was born in 92000, and this is falsé.'
__lowercase = tokenizer.tokenize(_UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__lowercase = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(_UpperCAmelCase )
__lowercase = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple=False ) -> Union[str, Any]:
__lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase = ''
else:
__lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
__lowercase = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[
: config.hidden_size, :
]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
__lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__lowercase = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> List[str]:
__lowercase = dct.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
__lowercase = ViTMSNConfig()
__lowercase = 1000
__lowercase = 'datasets/huggingface/label-files'
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowercase = 384
__lowercase = 1536
__lowercase = 6
elif "l16" in checkpoint_url:
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
__lowercase = 0.1
elif "b4" in checkpoint_url:
__lowercase = 4
elif "l7" in checkpoint_url:
__lowercase = 7
__lowercase = 1024
__lowercase = 4096
__lowercase = 24
__lowercase = 16
__lowercase = 0.1
__lowercase = ViTMSNModel(SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['target_encoder']
__lowercase = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
__lowercase = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
__lowercase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowercase = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__lowercase = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__lowercase = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__lowercase = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__lowercase = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 717 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : np.ndarray
lowerCAmelCase__ : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 718 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> int:
__lowercase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowercase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__lowercase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__lowercase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__lowercase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__lowercase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__lowercase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__lowercase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__lowercase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__lowercase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__lowercase = key.replace('image_encoder.module' , 'flava.image_model' )
__lowercase = key.replace('text_encoder.module' , 'flava.text_model' )
__lowercase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__lowercase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__lowercase = key.replace('text_projection' , 'flava.text_projection' )
__lowercase = key.replace('image_projection' , 'flava.image_projection' )
__lowercase = value.float()
for key, value in codebook_state_dict.items():
__lowercase = value
return upgrade
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=None ) -> List[Any]:
if config_path is not None:
__lowercase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__lowercase = FlavaConfig()
__lowercase = FlavaForPreTraining(SCREAMING_SNAKE_CASE ).eval()
__lowercase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_checkpoint=SCREAMING_SNAKE_CASE )
if os.path.exists(SCREAMING_SNAKE_CASE ):
__lowercase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )
__lowercase = upgrade_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowercase = hf_model.state_dict()
__lowercase = count_parameters(SCREAMING_SNAKE_CASE )
__lowercase = count_parameters(SCREAMING_SNAKE_CASE ) + count_parameters(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 719 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__lowercase = math.sqrt(SCREAMING_SNAKE_CASE )
__lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> np.ndarray:
__lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
__lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
__lowercase = np.zeros(img.shape )
__lowercase = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
__lowercase = val
return imga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list ) -> tuple:
__lowercase = args[1] if args[1:] else '../image_data/lena.jpg'
__lowercase = float(args[2] ) if args[2:] else 1.0
__lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase = int(args[4] )
__lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ = img / 255
SCREAMING_SNAKE_CASE__ = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ = out * 255
SCREAMING_SNAKE_CASE__ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 688 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
__lowercase = 0
while arr[min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - 1] < x:
__lowercase = step
step += int(math.floor(math.sqrt(SCREAMING_SNAKE_CASE ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase = prev + 1
if prev == min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be searched:\n"""))
SCREAMING_SNAKE_CASE__ = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'''Number {x} is at index {res}''') | 720 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : Tuple=4_00 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Any=True , ) -> Dict:
"""simple docstring"""
__lowercase = size if size is not None else {'height': 18, 'width': 18}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = apply_ocr
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr' ) )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
pass
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase )
self.assertIsInstance(encoding.boxes , _UpperCAmelCase )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__lowercase = Image.open(ds[0]['file'] ).convert('RGB' )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowercase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__lowercase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase )
self.assertListEqual(encoding.boxes , _UpperCAmelCase )
# with apply_OCR = False
__lowercase = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
__lowercase = image_processing(_UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 688 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=8 ) -> Tuple:
__lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A__ ( lowerCAmelCase__ ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : DDPMScheduler , _UpperCAmelCase : VQModel , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , )
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if latents is None:
__lowercase = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowercase = latents.to(_UpperCAmelCase )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
__lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Tuple , _UpperCAmelCase : Dict=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowercase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self._execution_device
__lowercase = guidance_scale > 1.0
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = torch.cat(_UpperCAmelCase , dim=0 )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = torch.cat(_UpperCAmelCase , dim=0 )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = torch.cat(_UpperCAmelCase , dim=0 )
__lowercase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = hint.repeat_interleave(_UpperCAmelCase , dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
__lowercase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
__lowercase = self.scheduler.timesteps
__lowercase = self.movq.config.latent_channels
__lowercase , __lowercase = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {'image_embeds': image_embeds, 'hint': hint}
__lowercase = self.unet(
sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0]
# post-processing
__lowercase = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 721 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = "umt5"
lowerCAmelCase__ : Tuple = ["past_key_values"]
def __init__( self : str , _UpperCAmelCase : int=25_01_12 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : str=8 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str=1e-6 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str="gated-gelu" , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple="T5Tokenizer" , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=0 , _UpperCAmelCase : int=1 , _UpperCAmelCase : List[str]=0 , **_UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split('-' )
__lowercase = act_info[-1]
__lowercase = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__lowercase = 'gelu_new'
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.d_model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self.num_layers
class A__ ( lowerCAmelCase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowercase = 'past_encoder_sequence + sequence'
__lowercase = {0: 'batch'}
__lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
__lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
return 13
@property
def a__ ( self : Dict ) -> float:
"""simple docstring"""
return 5e-4
| 688 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__a = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(PATH_TO_TRANSFORMERS)
__a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__a = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->List[Any]:
A =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
A =True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , a_ , )
is not None
):
A =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A =[
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
A =["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
A =True
if not attribute_used:
A =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A =True
elif attribute.endswith("_token_id" ):
A =True
# configuration class specific cases
if not case_allowed:
A =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
A =dict(inspect.signature(config_class.__init__ ).parameters )
A =[x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
A =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A ={}
if len(config_class.attribute_map ) > 0:
A ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A =inspect.getsourcefile(a_ )
A =os.path.dirname(a_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A =[os.path.join(a_ , a_ ) for fn in os.listdir(a_ ) if fn.startswith("modeling_" )]
# Get the source code strings
A =[]
for path in modeling_paths:
if os.path.isfile(a_ ):
with open(a_ ) as fp:
modeling_sources.append(fp.read() )
A =[]
for config_param, default_value in zip(a_ , a_ ):
# `attributes` here is all the variant names for `config_param`
A =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(a_ , a_ , a_ , a_ ):
unused_attributes.append(attributes[0] )
return sorted(a_ )
def UpperCamelCase_ ( ) ->List[Any]:
A ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda a_ : inspect.isclass(a_ )
and issubclass(a_ , a_ )
and inspect.getmodule(a_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A =check_config_attributes_being_used(a_ )
if len(a_ ) > 0:
A =unused_attributes
if len(a_ ) > 0:
A ="The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(a_ )
if __name__ == "__main__":
check_config_attributes()
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__a = logging.get_logger(__name__)
__a = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowerCAmelCase__ )} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_A = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_A = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_A = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_A = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_A = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_A = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_A = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_A = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "train"
_A = "dev"
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = 42
_A = 42
_A = 42
_A = 42
def __init__( self : Optional[Any] , snake_case__ : SquadDataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : Optional[int] = None , snake_case__ : Union[str, Split] = Split.train , snake_case__ : Optional[bool] = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = "pt" , ):
"""simple docstring"""
A =args
A =is_language_sensitive
A =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case__ , snake_case__ ):
try:
A =Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A =mode
# Load data features from cache or dataset file
A ="v2" if args.version_2_with_negative else "v1"
A =os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A =cached_features_file + ".lock"
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
A =time.time()
A =torch.load(snake_case__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A =self.old_features["features"]
A =self.old_features.get("dataset" , snake_case__ )
A =self.old_features.get("examples" , snake_case__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
A =self.processor.get_dev_examples(args.data_dir )
else:
A =self.processor.get_train_examples(args.data_dir )
A , A =squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case__ , )
A =time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
A =self.features[i]
A =torch.tensor(feature.input_ids , dtype=torch.long )
A =torch.tensor(feature.attention_mask , dtype=torch.long )
A =torch.tensor(feature.token_type_ids , dtype=torch.long )
A =torch.tensor(feature.cls_index , dtype=torch.long )
A =torch.tensor(feature.p_mask , dtype=torch.float )
A =torch.tensor(feature.is_impossible , dtype=torch.float )
A ={
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A =torch.tensor(feature.start_position , dtype=torch.long )
A =torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ , a_ ) ->Optional[Any]:
A =set()
A =[]
def parse_line(a_ ):
for line in fp:
if isinstance(a_ , a_ ):
A =line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(a_ ) > 0:
A ="\n".join(a_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(a_ )
buffer.clear()
continue
else:
A =line.strip()
buffer.append(a_ )
if from_gh:
for filename in os.listdir(a_ ):
A =os.path.join(a_ , a_ )
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with open(a_ ) as fp:
parse_line(a_ )
else:
try:
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a_ ) as fp:
parse_line(a_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
A =set()
A =[os.path.join(a_ , a_ ) for p in os.listdir(a_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a_ , a_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase_ ( a_ ) ->Dict:
return values.split("," )
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__a = parser.parse_args()
__a = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__a = extract_warnings(args.output_dir, args.targets)
__a = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
__a = 1_0
__a = 2_5_6
def UpperCamelCase_ ( a_ ) ->Optional[MinHash]:
if len(a_ ) < MIN_NUM_TOKENS:
return None
A =MinHash(num_perm=a_ )
for token in set(a_ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase_ ( a_ ) ->Set[str]:
return {t for t in NON_ALPHA.split(a_ ) if len(t.strip() ) > 0}
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Dict , *,
snake_case__ : float = 0.85 , ):
"""simple docstring"""
A =duplication_jaccard_threshold
A =NUM_PERM
A =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A =defaultdict(snake_case__ )
def _a ( self : List[str] , snake_case__ : Tuple , snake_case__ : MinHash ):
"""simple docstring"""
A =self._index.query(snake_case__ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
A =[]
for base, duplicates in self._duplicate_clusters.items():
A =[base] + list(snake_case__ )
# reformat the cluster to be a list of dict
A =[{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(snake_case__ )
return duplicate_clusters
def _a ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
A =self.get_duplicate_clusters()
with open(snake_case__ , "w" ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase_ ( a_ ) ->Dict:
A , A =element
A =get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase_ ( a_ ) ->Dict:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =DuplicationIndex(duplication_jaccard_threshold=a_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a_ ) ) , max_queue_size=100 ) ):
di.add(a_ , a_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =get_tokens(a_ )
A =get_tokens(a_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a = None
def UpperCamelCase_ ( a_ , a_ ) ->Union[str, Any]:
A =[]
for elementa in cluster:
A =_shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
A =_shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(a_ , a_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A =1
extremes.append(a_ )
return extremes
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
global _shared_dataset
A =dataset
A =[]
A =partial(_find_cluster_extremes_shared , jaccard_threshold=a_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a_ , a_ , ) , total=len(a_ ) , ):
extremes_list.append(a_ )
return extremes_list
def UpperCamelCase_ ( a_ , a_ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
A =make_duplicate_clusters(a_ , a_ )
A ={x["base_index"] for cluster in duplicate_clusters for x in cluster}
A ={}
A =find_extremes(a_ , a_ , a_ )
for extremes in extremes_clusters:
for element in extremes:
A =element
A =duplicate_indices - set(extreme_dict.keys() )
A =dataset.filter(lambda a_ , a_ : idx not in remove_indices , with_indices=a_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A =element["base_index"] in extreme_dict
if element["is_extreme"]:
A =extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(a_ )}''' )
print(f'''Number of duplicate clusters: {len(a_ )}''' )
print(f'''Files in duplicate cluster: {len(a_ )}''' )
print(f'''Unique files in duplicate cluster: {len(a_ )}''' )
print(f'''Filtered dataset size: {len(a_ )}''' )
return ds_filter, duplicate_clusters
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__a = get_logger(__name__)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
A =(
os.path.join(snake_case__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A =Extractor
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A =os.path.abspath(snake_case__ )
return os.path.join(self.extract_dir , hash_url_to_filename(snake_case__ ) )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(snake_case__ ) and not (os.path.isdir(snake_case__ ) and os.listdir(snake_case__ ))
)
def _a ( self : int , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
A =self.extractor.infer_extractor_format(snake_case__ )
if not extractor_format:
return input_path
A =self._get_output_path(snake_case__ )
if self._do_extract(snake_case__ , snake_case__ ):
self.extractor.extract(snake_case__ , snake_case__ , snake_case__ )
return output_path
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def _a ( cls : Optional[Any] , snake_case__ : Union[Path, str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
...
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_A = []
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : int ):
"""simple docstring"""
with open(snake_case__ , "rb" ) as f:
return f.read(snake_case__ )
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
A =max(len(snake_case__ ) for cls_magic_number in cls.magic_numbers )
try:
A =cls.read_magic_number(snake_case__ , snake_case__ )
except OSError:
return False
return any(magic_number.startswith(snake_case__ ) for cls_magic_number in cls.magic_numbers )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
def _a ( cls : List[Any] , snake_case__ : Union[Path, str] , **snake_case__ : int ):
"""simple docstring"""
return tarfile.is_tarfile(snake_case__ )
@staticmethod
def _a ( snake_case__ : List[Any] , snake_case__ : int ):
"""simple docstring"""
def resolved(snake_case__ : str ) -> str:
return os.path.realpath(os.path.abspath(snake_case__ ) )
def badpath(snake_case__ : str , snake_case__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(snake_case__ , snake_case__ ) ).startswith(snake_case__ )
def badlink(snake_case__ : Tuple , snake_case__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
A =resolved(os.path.join(snake_case__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=snake_case__ )
A =resolved(snake_case__ )
for finfo in members:
if badpath(finfo.name , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(snake_case__ , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(snake_case__ , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A =tarfile.open(snake_case__ )
tar_file.extractall(snake_case__ , members=TarExtractor.safemembers(snake_case__ , snake_case__ ) )
tar_file.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x1F\x8B"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(snake_case__ , "rb" ) as gzip_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def _a ( cls : Dict , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(snake_case__ , magic_number=snake_case__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(snake_case__ , "rb" ) as fp:
A =_EndRecData(snake_case__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A =fp.read(snake_case__ ) # CD is where we expect it to be
if len(snake_case__ ) == sizeCentralDir:
A =struct.unpack(snake_case__ , snake_case__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with zipfile.ZipFile(snake_case__ , "r" ) as zip_file:
zip_file.extractall(snake_case__ )
zip_file.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(snake_case__ ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A =rarfile.RarFile(snake_case__ )
rf.extractall(snake_case__ )
rf.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x28\xb5\x2F\xFD"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
A =zstd.ZstdDecompressor()
with open(snake_case__ , "rb" ) as ifh, open(snake_case__ , "wb" ) as ofh:
dctx.copy_stream(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x42\x5A\x68"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with bza.open(snake_case__ , "rb" ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with pyazr.SevenZipFile(snake_case__ , "r" ) as archive:
archive.extractall(snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x04\x22\x4D\x18"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(snake_case__ , "rb" ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__:
"""simple docstring"""
_A = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls : Union[str, Any] ):
"""simple docstring"""
return max(
len(snake_case__ )
for extractor in cls.extractors.values()
if issubclass(snake_case__ , snake_case__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(snake_case__ , magic_number_length=snake_case__ )
except OSError:
return b""
@classmethod
def _a ( cls : Optional[int] , snake_case__ : Union[Path, str] , snake_case__ : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=snake_case__ , )
A =cls.infer_extractor_format(snake_case__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls : int , snake_case__ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
A =cls._get_magic_number_max_length()
A =cls._read_magic_number(snake_case__ , snake_case__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(snake_case__ , magic_number=snake_case__ ):
return extractor_format
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] , snake_case__ : Optional[str] = None , snake_case__ : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(snake_case__ ) , exist_ok=snake_case__ )
# Prevent parallel extractions
A =str(Path(snake_case__ ).with_suffix(".lock" ) )
with FileLock(snake_case__ ):
shutil.rmtree(snake_case__ , ignore_errors=snake_case__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(snake_case__ , snake_case__ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=snake_case__ , )
A =extractor if extractor != "deprecated" else extractor_format
else:
A =cls.extractors[extractor_format]
return extractor.extract(snake_case__ , snake_case__ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=snake_case__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(snake_case__ ):
return extractor.extract(snake_case__ , snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
__a = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
A =botoa.client("iam" )
A ={
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ , AssumeRolePolicyDocument=json.dumps(a_ , indent=2 ) )
A ={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(a_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def UpperCamelCase_ ( a_ ) ->List[str]:
A =botoa.client("iam" )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def UpperCamelCase_ ( ) ->Optional[Any]:
A =_ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , a_ , )
A =None
if credentials_configuration == 0:
A =_ask_field("Enter your AWS Profile name: [default] " , default="default" )
A =aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
A =_ask_field("AWS Access Key ID: " )
A =aws_access_key_id
A =_ask_field("AWS Secret Access Key: " )
A =aws_secret_access_key
A =_ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
A =aws_region
A =_ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , a_ , )
if role_management == 0:
A =_ask_field("Enter your IAM role name: " )
else:
A ="accelerate_sagemaker_execution_role"
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(a_ )
A =_ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_custom_docker_image:
A =_ask_field("Enter your Docker image: " , lambda a_ : str(a_ ).lower() )
A =_ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_sagemaker_inputs_enabled:
A =_ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda a_ : str(a_ ).lower() , )
A =_ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_sagemaker_metrics_enabled:
A =_ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda a_ : str(a_ ).lower() , )
A =_ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
A ={}
A =_ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_dynamo:
A ="dynamo_"
A =_ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A =_ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_custom_options:
A =_ask_options(
"Which mode do you want to use?" , a_ , lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] , default="default" , )
A =_ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =_ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A ="Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
A =_ask_options(
a_ , a_ , lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A =_ask_field(a_ , lambda a_ : str(a_ ).lower() , default="ml.p3.2xlarge" )
A =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A =_ask_field(
"How many machines do you want use? [1]: " , a_ , default=1 , )
A =_ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=a_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a_ , use_cpu=a_ , dynamo_config=a_ , eca_instance_type=a_ , profile=a_ , region=a_ , iam_role_name=a_ , mixed_precision=a_ , num_machines=a_ , sagemaker_inputs_file=a_ , sagemaker_metrics_file=a_ , )
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =1
A =2
while i * i <= n:
A =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase_ ( ) ->int:
A =1
A =1
while True:
i += 1
t_num += i
if count_divisors(a_ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
import heapq
import sys
import numpy as np
__a = tuple[int, int]
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
A =[]
A =set()
def _a ( self : int ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : List[str] , snake_case__ : List[Any] , snake_case__ : str ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
A =[]
((A) , (A)) =heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)) =heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
A =[]
((A) , (A)) =heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)) =heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : List[Any] ):
"""simple docstring"""
return self.elements[0][1]
def _a ( self : str ):
"""simple docstring"""
((A) , (A)) =heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def UpperCamelCase_ ( a_ , a_ ) ->List[Any]:
# euclidean distance
A =np.array(a_ )
A =np.array(a_ )
return np.linalg.norm(a - b )
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
# integer division by time variable
return consistent_heuristic(a_ , a_ ) // t
def UpperCamelCase_ ( a_ , a_ ) ->Union[str, Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->str:
A =g_function[start] + Wa * heuristics[i](a_ , a_ )
return ans
def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[str]:
A =np.chararray((n, n) )
for i in range(a_ ):
for j in range(a_ ):
A ="*"
for i in range(a_ ):
for j in range(a_ ):
if (j, (n - 1) - i) in blocks:
A ="#"
A ="-"
A =back_pointer[goal]
while x != start:
((A) , (A)) =x
# print(x)
A ="-"
A =back_pointer[x]
A ="-"
for i in range(a_ ):
for j in range(a_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
A =back_pointer[goal]
while x != start:
print(a_ , end=" " )
A =back_pointer[x]
print(a_ )
sys.exit()
def UpperCamelCase_ ( a_ ) ->str:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) ->Optional[Any]:
for itera in range(a_ ):
open_list[itera].remove_element(a_ )
# print("s", s)
# print("j", j)
((A) , (A)) =s
A =(x - 1, y)
A =(x + 1, y)
A =(x, y + 1)
A =(x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a_ )
A =-1
A =float("inf" )
if valid(a_ ) and g_function[neighbours] > g_function[s] + 1:
A =g_function[s] + 1
A =s
if neighbours not in close_list_anchor:
open_list[0].put(a_ , key(a_ , 0 , a_ , a_ ) )
if neighbours not in close_list_inad:
for var in range(1 , a_ ):
if key(a_ , a_ , a_ , a_ ) <= Wa * key(
a_ , 0 , a_ , a_ ):
open_list[j].put(
a_ , key(a_ , a_ , a_ , a_ ) )
def UpperCamelCase_ ( ) ->Dict:
A =[]
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__a = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__a = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
__a = make_common_ground()
__a = blocks_blk
# hyper parameters
__a = 1
__a = 1
__a = 2_0
__a = 3 # one consistent and two other inconsistent
# start and end destination
__a = (0, 0)
__a = (n - 1, n - 1)
__a = 1
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Dict:
A ={start: 0, goal: float("inf" )}
A ={start: -1, goal: -1}
A =[]
A =set()
for i in range(a_ ):
open_list.append(PriorityQueue() )
open_list[i].put(a_ , key(a_ , a_ , a_ , a_ ) )
A =[]
A =[]
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a_ , a_ , a_ )
else:
A , A =open_list[i].top_show()
visited.add(a_ )
expand_state(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_inad.append(a_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a_ , a_ , a_ )
else:
A =open_list[0].top_show()
visited.add(a_ )
expand_state(
a_ , 0 , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_anchor.append(a_ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a_ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 689 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__a = 4
__a = 3
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
pass
def UpperCamelCase_ ( a_ ) ->Optional[int]:
for shard in shards:
for i in range(a_ ):
yield {"i": i, "shard": shard}
def UpperCamelCase_ ( ) ->Optional[int]:
A =int(os.environ["RANK"] )
A =int(os.environ["WORLD_SIZE"] )
A =ArgumentParser()
parser.add_argument("--streaming" , type=a_ )
parser.add_argument("--local_rank" , type=a_ )
parser.add_argument("--num_workers" , type=a_ , default=0 )
A =parser.parse_args()
A =args.streaming
A =args.num_workers
A ={"shards": [f'''shard_{shard_idx}''' for shard_idx in range(a_ )]}
A =IterableDataset.from_generator(a_ , gen_kwargs=a_ )
if not streaming:
A =Dataset.from_list(list(a_ ) )
A =split_dataset_by_node(a_ , rank=a_ , world_size=a_ )
A =torch.utils.data.DataLoader(a_ , num_workers=a_ )
A =NUM_SHARDS * NUM_ITEMS_PER_SHARD
A =full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
A =sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 689 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from __future__ import annotations
__a = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , ) ->tuple[list[list[int]], list[list[int]]]:
A =[
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the reference grid
A =1
A =[
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the action grid
A =init[0]
A =init[1]
A =0
A =g + heuristic[x][y] # cost from starting cell to destination cell
A =[[f, g, x, y]]
A =False # flag that is set when search is complete
A =False # flag set if we can't find expand
while not found and not resign:
if len(a_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A =cell.pop()
A =next_cell[2]
A =next_cell[3]
A =next_cell[1]
if x == goal[0] and y == goal[1]:
A =True
else:
for i in range(len(a_ ) ): # to try out different valid actions
A =x + DIRECTIONS[i][0]
A =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A =g + cost
A =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A =1
A =i
A =[]
A =goal[0]
A =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A =x - DIRECTIONS[action[x][y]][0]
A =y - DIRECTIONS[action[x][y]][1]
A =xa
A =ya
invpath.append([x, y] )
A =[]
for i in range(len(a_ ) ):
path.append(invpath[len(a_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__a = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__a = [0, 0]
# all coordinates are given in format [y,x]
__a = [len(grid) - 1, len(grid[0]) - 1]
__a = 1
# the cost map which pushes the path closer to the goal
__a = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__a = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__a = 9_9
__a , __a = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__a = parser.parse_args()
if args.model_type == "bert":
__a = BertForMaskedLM.from_pretrained(args.model_name)
__a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__a = model.state_dict()
__a = {}
for w in ["word_embeddings", "position_embeddings"]:
__a = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__a = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__a = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__a = state_dict["""cls.predictions.decoder.weight"""]
__a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__a = state_dict[F'''cls.predictions.transform.dense.{w}''']
__a = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
__a = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
__a = """▁"""
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = AlbertTokenizer
def __init__( self : Dict , snake_case__ : Optional[int]=None , snake_case__ : int=None , snake_case__ : Any=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=False , snake_case__ : Tuple="[CLS]" , snake_case__ : Any="[SEP]" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : List[str]="[SEP]" , snake_case__ : List[str]="<pad>" , snake_case__ : List[Any]="[CLS]" , snake_case__ : Optional[Any]="[MASK]" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
A =(
AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ , normalized=snake_case__ )
if isinstance(snake_case__ , snake_case__ )
else mask_token
)
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
A =do_lower_case
A =remove_space
A =keep_accents
A =vocab_file
A =False if not self.vocab_file else True
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 689 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _a ( self : List[str] ):
"""simple docstring"""
A =self.dummy_uncond_unet
A =ScoreSdeVeScheduler()
A =ScoreSdeVePipeline(unet=snake_case__ , scheduler=snake_case__ )
sde_ve.to(snake_case__ )
sde_ve.set_progress_bar_config(disable=snake_case__ )
A =torch.manual_seed(0 )
A =sde_ve(num_inference_steps=2 , output_type="numpy" , generator=snake_case__ ).images
A =torch.manual_seed(0 )
A =sde_ve(num_inference_steps=2 , output_type="numpy" , generator=snake_case__ , return_dict=snake_case__ )[
0
]
A =image[0, -3:, -3:, -1]
A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A ="google/ncsnpp-church-256"
A =UNetaDModel.from_pretrained(snake_case__ )
A =ScoreSdeVeScheduler.from_pretrained(snake_case__ )
A =ScoreSdeVePipeline(unet=snake_case__ , scheduler=snake_case__ )
sde_ve.to(snake_case__ )
sde_ve.set_progress_bar_config(disable=snake_case__ )
A =torch.manual_seed(0 )
A =sde_ve(num_inference_steps=10 , output_type="numpy" , generator=snake_case__ ).images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 689 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
A =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A =n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( a_ ) ->int:
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def UpperCamelCase_ ( a_ ) ->int:
if n < 0:
raise ValueError("factorial() not defined for negative values" )
A =1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( a_ ) ->int:
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
__a = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
__a = 6_5_5_2_1
def UpperCamelCase_ ( a_ ) ->int:
A =1
A =0
for plain_chr in plain_text:
A =(a + ord(a_ )) % MOD_ADLER
A =(b + a) % MOD_ADLER
return (b << 16) | a
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase_ ( a_ = True , *a_ , **a_ ) ->Optional[Any]:
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A =False
if main_process_only:
A =PartialState().local_process_index == 0
return _tqdm(*a_ , **a_ , disable=a_ )
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
def UpperCamelCase_ ( a_ ) ->list:
if n_term == "":
return []
A =[]
for temp in range(int(a_ ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
__a = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__a = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__a = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str]="<s>" , snake_case__ : Dict="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Dict="<unk>" , snake_case__ : List[Any]="<pad>" , snake_case__ : List[Any]="<mask>" , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
A =vocab_file
A =merges_file
A ={}
A =0
A =1
A =2
A =3
self.add_from_file(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[:-1]
A =[tuple(merge.split()[:-1] ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
def _a ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A =[self.cls_token_id]
A =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
A =f.readlines()
for lineTmp in lines:
A =lineTmp.strip()
A =line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
A =line[:idx]
A =len(self.encoder )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->int:
if not nums:
return 0
A =nums[0]
A =0
for num in nums[1:]:
A , A =(
max_excluding + num,
max(a_ , a_ ),
)
return max(a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from math import ceil
def UpperCamelCase_ ( a_ = 1001 ) ->int:
A =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A =2 * i + 1
A =2 * i
A =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase_ ( a_ ) ->Tuple:
A =VideoMAEConfig()
set_architecture_configs(a_ , a_ )
if "finetuned" not in model_name:
A =False
if "finetuned" in model_name:
A ="huggingface/label-files"
if "kinetics" in model_name:
A =400
A ="kinetics400-id2label.json"
elif "ssv2" in model_name:
A =174
A ="something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
A =json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
A ={int(a_ ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( a_ , a_ ) ->Union[str, Any]:
if "small" in model_name:
A =384
A =1536
A =12
A =16
A =12
A =3
A =192
A =768
elif "large" in model_name:
A =1024
A =4096
A =24
A =16
A =12
A =8
A =512
A =2048
elif "huge" in model_name:
A =1280
A =5120
A =32
A =16
A =12
A =8
A =640
A =2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def UpperCamelCase_ ( a_ ) ->List[Any]:
if "encoder." in name:
A =name.replace("encoder." , "" )
if "cls_token" in name:
A =name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
A =name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
A =name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A =name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A =name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
A =name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
A =name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
A =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
A =name.replace("attn" , "attention.self" )
if "attn" in name:
A =name.replace("attn" , "attention.attention" )
if "norm1" in name:
A =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A =name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
A =name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
A =name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
A =name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A =name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A =name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
A =name.replace("head" , "classifier" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Optional[int]:
for key in orig_state_dict.copy().keys():
A =orig_state_dict.pop(a_ )
if key.startswith("encoder." ):
A =key.replace("encoder." , "" )
if "qkv" in key:
A =key.split("." )
if key.startswith("decoder.blocks" ):
A =config.decoder_hidden_size
A =int(key_split[2] )
A ="decoder.decoder_layers."
if "weight" in key:
A =val[:dim, :]
A =val[dim : dim * 2, :]
A =val[-dim:, :]
else:
A =config.hidden_size
A =int(key_split[1] )
A ="videomae.encoder.layer."
if "weight" in key:
A =val[:dim, :]
A =val[dim : dim * 2, :]
A =val[-dim:, :]
else:
A =val
return orig_state_dict
def UpperCamelCase_ ( ) ->int:
A =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A =np.load(a_ )
return list(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->Union[str, Any]:
A =get_videomae_config(a_ )
if "finetuned" in model_name:
A =VideoMAEForVideoClassification(a_ )
else:
A =VideoMAEForPreTraining(a_ )
# download original checkpoint, hosted on Google Drive
A ="pytorch_model.bin"
gdown.cached_download(a_ , a_ , quiet=a_ )
A =torch.load(a_ , map_location="cpu" )
if "model" in files:
A =files["model"]
else:
A =files["module"]
A =convert_state_dict(a_ , a_ )
model.load_state_dict(a_ )
model.eval()
# verify model on basic input
A =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A =prepare_video()
A =image_processor(a_ , return_tensors="pt" )
if "finetuned" not in model_name:
A =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
A =torch.load(a_ )
A =model(**a_ )
A =outputs.logits
A =[
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A =torch.Size([1, 400] )
A =torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
A =torch.Size([1, 174] )
A =torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
A =torch.Size([1, 1408, 1536] )
A =torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
A =torch.Size([1, 1408, 1536] )
A =torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A =torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
A =torch.Size([1, 1408, 1536] )
A =torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A =torch.Size([1, 400] )
A =torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A =torch.Size([1, 400] )
A =torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A =torch.Size([1, 400] )
A =torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
A =torch.Size([1, 400] )
A =torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
A =torch.Size([1, 1408, 1536] )
A =torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A =torch.Size([1, 174] )
A =torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
A =torch.Size([1, 1408, 1536] )
A =torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A =torch.Size([1, 174] )
A =torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , a_ , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A =outputs.loss
assert torch.allclose(a_ , a_ , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
model.save_pretrained(a_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(a_ , organization="nielsr" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__a = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "speech_to_text"
_A = ["past_key_values"]
_A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , snake_case__ : Tuple=1_00_00 , snake_case__ : str=12 , snake_case__ : str=20_48 , snake_case__ : str=4 , snake_case__ : str=6 , snake_case__ : Tuple=20_48 , snake_case__ : Optional[Any]=4 , snake_case__ : Any=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : int=True , snake_case__ : Tuple=True , snake_case__ : List[str]="relu" , snake_case__ : str=2_56 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.0 , snake_case__ : int=0.0 , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=1 , snake_case__ : Union[str, Any]=0 , snake_case__ : str=2 , snake_case__ : Dict=60_00 , snake_case__ : int=10_24 , snake_case__ : Tuple=2 , snake_case__ : Optional[Any]=(5, 5) , snake_case__ : Dict=10_24 , snake_case__ : Dict=80 , snake_case__ : List[str]=1 , **snake_case__ : Optional[int] , ):
"""simple docstring"""
A =vocab_size
A =d_model
A =encoder_ffn_dim
A =encoder_layers
A =encoder_attention_heads
A =decoder_ffn_dim
A =decoder_layers
A =decoder_attention_heads
A =dropout
A =attention_dropout
A =activation_dropout
A =activation_function
A =init_std
A =encoder_layerdrop
A =decoder_layerdrop
A =use_cache
A =encoder_layers
A =scale_embedding # scale factor will be sqrt(d_model) if True
A =max_source_positions
A =max_target_positions
A =num_conv_layers
A =list(snake_case__ )
A =conv_channels
A =input_feat_per_channel
A =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` "
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__a = False
__a = False
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return TrainCommand(a_ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
def _a ( snake_case__ : ArgumentParser ):
"""simple docstring"""
A =parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=snake_case__ , required=snake_case__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=snake_case__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=snake_case__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=snake_case__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=snake_case__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=snake_case__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=snake_case__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=snake_case__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=snake_case__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=snake_case__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=snake_case__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=snake_case__ , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=snake_case__ , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=snake_case__ )
def __init__( self : List[str] , snake_case__ : Namespace ):
"""simple docstring"""
A =logging.get_logger("transformers-cli/training" )
A ="tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=snake_case__ )
A =args.output
A =args.column_label
A =args.column_text
A =args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
A =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
A =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A =None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
A =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
A =args.validation_split
A =args.train_batch_size
A =args.valid_batch_size
A =args.learning_rate
A =args.adam_epsilon
def _a ( self : Union[str, Any] ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _a ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 689 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__a = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
super().__init__()
A =torchvision.models.resnetaaa(pretrained=snake_case__ )
A =list(model.children() )[:-2]
A =nn.Sequential(*snake_case__ )
A =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =self.pool(self.model(snake_case__ ) )
A =torch.flatten(snake_case__ , start_dim=2 )
A =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
A =[json.loads(snake_case__ ) for l in open(snake_case__ )]
A =os.path.dirname(snake_case__ )
A =tokenizer
A =labels
A =len(snake_case__ )
A =max_seq_length
A =transforms
def __len__( self : Any ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
A =torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=snake_case__ ) )
A , A , A =sentence[0], sentence[1:-1], sentence[-1]
A =sentence[: self.max_seq_length]
A =torch.zeros(self.n_classes )
A =1
A =Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
A =self.transforms(snake_case__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : Dict ):
"""simple docstring"""
A =Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def UpperCamelCase_ ( a_ ) ->List[str]:
A =[len(row["sentence"] ) for row in batch]
A , A =len(a_ ), max(a_ )
A =torch.zeros(a_ , a_ , dtype=torch.long )
A =torch.zeros(a_ , a_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a_ , a_ ) ):
A =input_row["sentence"]
A =1
A =torch.stack([row["image"] for row in batch] )
A =torch.stack([row["label"] for row in batch] )
A =torch.stack([row["image_start_token"] for row in batch] )
A =torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase_ ( ) ->Optional[int]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase_ ( ) ->Optional[Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 689 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 1 |
import random
class UpperCamelCase__:
"""simple docstring"""
@staticmethod
def _a ( snake_case__ : str ):
"""simple docstring"""
A =[ord(snake_case__ ) for i in text]
A =[]
A =[]
for i in plain:
A =random.randint(1 , 3_00 )
A =(i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _a ( snake_case__ : list[int] , snake_case__ : list[int] ):
"""simple docstring"""
A =[]
for i in range(len(snake_case__ ) ):
A =int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__a , __a = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 689 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_ ( ) ->Dict:
A =ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
A =parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(a_ )
DownloadCommand.register_subcommand(a_ )
EnvironmentCommand.register_subcommand(a_ )
RunCommand.register_subcommand(a_ )
ServeCommand.register_subcommand(a_ )
UserCommands.register_subcommand(a_ )
AddNewModelCommand.register_subcommand(a_ )
AddNewModelLikeCommand.register_subcommand(a_ )
LfsCommands.register_subcommand(a_ )
PTtoTFCommand.register_subcommand(a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
A =args.func(a_ )
service.run()
if __name__ == "__main__":
main()
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__a = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =0
def _a ( self : int ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
A =AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
A =AutoConfig.for_model("roberta" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
A =os.path.join(snake_case__ , "fake-roberta" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(os.path.join(snake_case__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
A =AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(type(snake_case__ ) , snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case__ )
# Wrong model type will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register("model" , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register("bert" , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A =CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
A =AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
A =AutoConfig.from_pretrained("bert-base" )
def _a ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A =AutoConfig.from_pretrained(snake_case__ , revision="aaaaaa" )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
A =AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _a ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(snake_case__ ):
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case__ )
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
A =AutoConfig.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "new-model"
try:
AutoConfig.register("new-model" , snake_case__ )
# If remote code is not set, the default is to use local
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
A =AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(snake_case__ , "num_encoder_blocks" ) )
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : str , snake_case__ : Optional[int]=13 , snake_case__ : str=64 , snake_case__ : Optional[int]=3 , snake_case__ : List[Any]=4 , snake_case__ : List[Any]=[2, 2, 2, 2] , snake_case__ : Union[str, Any]=[8, 4, 2, 1] , snake_case__ : Union[str, Any]=[16, 32, 64, 1_28] , snake_case__ : Dict=[1, 4, 8, 16] , snake_case__ : Dict=[1, 2, 4, 8] , snake_case__ : List[str]=True , snake_case__ : List[Any]=True , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : int=0.1 , snake_case__ : str=0.02 , snake_case__ : List[Any]=3 , snake_case__ : Union[str, Any]=None , ):
"""simple docstring"""
A =parent
A =batch_size
A =image_size
A =num_channels
A =num_encoder_blocks
A =sr_ratios
A =depths
A =hidden_sizes
A =downsampling_rates
A =num_attention_heads
A =is_training
A =use_labels
A =hidden_act
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =initializer_range
A =num_labels
A =scope
def _a ( self : Optional[int] ):
"""simple docstring"""
A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A =None
if self.use_labels:
A =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A =self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[int] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
A =SegformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
A =model(snake_case__ )
A =A =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
A =self.num_labels
A =SegformerForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
A =model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
A =1
A =SegformerForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
A =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case__ )
A =model(snake_case__ , labels=snake_case__ )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : Dict ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
A , A , A =config_and_inputs
A ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_A = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A = True
_A = False
_A = False
_A = False
def _a ( self : List[Any] ):
"""simple docstring"""
A =SegformerModelTester(self )
A =SegformerConfigTester(self , config_class=snake_case__ )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =model_class(snake_case__ )
A =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A =[*signature.parameters.keys()]
A =["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =True
for model_class in self.all_model_classes:
A =True
A =False
A =True
A =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
A =outputs.attentions
A =sum(self.model_tester.depths )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A =True
A =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
A =outputs.attentions
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first attentions (first block, first layer)
A =(self.model_tester.image_size // 4) ** 2
A =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A =(self.model_tester.image_size // 32) ** 2
A =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A =len(snake_case__ )
# Check attention is always last and order is fine
A =True
A =True
A =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
A =outputs.attentions
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first attentions (first block, first layer)
A =(self.model_tester.image_size // 4) ** 2
A =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : str , snake_case__ : List[Any] , snake_case__ : int ):
A =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
A =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
A =outputs.hidden_states
A =self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case__ ) , snake_case__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ):
continue
A =model_class(snake_case__ )
model.to(snake_case__ )
model.train()
A =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
A =model(**snake_case__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self : str ):
"""simple docstring"""
pass
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =SegformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase_ ( ) ->Tuple:
A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : str ):
"""simple docstring"""
A =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
A =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case__ )
A =prepare_img()
A =image_processor(images=snake_case__ , return_tensors="pt" )
A =encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ )
A =torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , snake_case__ )
A =torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
A =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(snake_case__ )
A =prepare_img()
A =image_processor(images=snake_case__ , return_tensors="pt" )
A =encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ )
A =torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , snake_case__ )
A =torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case__ , atol=1E-1 ) )
@slow
def _a ( self : Dict ):
"""simple docstring"""
A =SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
A =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case__ )
A =prepare_img()
A =image_processor(images=snake_case__ , return_tensors="pt" )
A =encoded_inputs.pixel_values.to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ )
A =outputs.logits.detach().cpu()
A =image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_00, 3_00)] )
A =torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , snake_case__ )
A =image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
A =torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import numpy as np
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
A =(0, 0)
A =None
A =0
A =0
A =0
def __eq__( self : int , snake_case__ : List[Any] ):
"""simple docstring"""
return self.position == cell.position
def _a ( self : Optional[int] ):
"""simple docstring"""
print(self.position )
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : List[str]=(5, 5) ):
"""simple docstring"""
A =np.zeros(snake_case__ )
A =world_size[0]
A =world_size[1]
def _a ( self : Any ):
"""simple docstring"""
print(self.w )
def _a ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
A =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A =cell.position[0]
A =cell.position[1]
A =[]
for n in neughbour_cord:
A =current_x + n[0]
A =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A =Cell()
A =(x, y)
A =cell
neighbours.append(snake_case__ )
return neighbours
def UpperCamelCase_ ( a_ , a_ , a_ ) ->str:
A =[]
A =[]
_open.append(a_ )
while _open:
A =np.argmin([n.f for n in _open] )
A =_open[min_f]
_closed.append(_open.pop(a_ ) )
if current == goal:
break
for n in world.get_neigbours(a_ ):
for c in _closed:
if c == n:
continue
A =current.g + 1
A , A =n.position
A , A =goal.position
A =(ya - ya) ** 2 + (xa - xa) ** 2
A =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a_ )
A =[]
while current.parent is not None:
path.append(current.position )
A =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__a = Gridworld()
# Start position and goal
__a = Cell()
__a = (0, 0)
__a = Cell()
__a = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
__a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__a = 1
print(world.w)
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
A ={}
def _a ( self : List[Any] , snake_case__ : Any , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
A =super().add_tokens(snake_case__ , *snake_case__ , **snake_case__ )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def _a ( self : Any , snake_case__ : Union[str, Any] , *snake_case__ : List[str] , snake_case__ : List[str]=1 , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =[]
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
else:
A =[]
for i in range(snake_case__ ):
A =placeholder_token + f'''_{i}'''
self.try_adding_tokens(snake_case__ , *snake_case__ , **snake_case__ )
output.append(snake_case__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
A =output
def _a ( self : Any , snake_case__ : str , snake_case__ : Optional[int]=False , snake_case__ : str=1.0 ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
A =[]
for i in range(len(snake_case__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A =self.token_map[placeholder_token]
A =tokens[: 1 + int(len(snake_case__ ) * prop_tokens_to_load )]
if vector_shuffle:
A =copy.copy(snake_case__ )
random.shuffle(snake_case__ )
A =text.replace(snake_case__ , " ".join(snake_case__ ) )
return text
def __call__( self : List[Any] , snake_case__ : Tuple , *snake_case__ : Any , snake_case__ : Any=False , snake_case__ : Dict=1.0 , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
def _a ( self : int , snake_case__ : Any , *snake_case__ : List[str] , snake_case__ : List[str]=False , snake_case__ : List[Any]=1.0 , **snake_case__ : List[Any] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case__ , vector_shuffle=snake_case__ , prop_tokens_to_load=snake_case__ ) , *snake_case__ , **snake_case__ , )
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "char"
_A = "bpe"
_A = "wp"
__a = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = ["image_processor", "char_tokenizer"]
_A = "ViTImageProcessor"
_A = "MgpstrTokenizer"
def __init__( self : Dict , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case__ , )
A =kwargs.pop("feature_extractor" )
A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A =tokenizer
A =AutoTokenizer.from_pretrained("gpt2" )
A =AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Dict , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , **snake_case__ : List[Any] ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None:
A =self.char_tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A , A , A =sequences
A =char_preds.size(0 )
A , A =self._decode_helper(snake_case__ , "char" )
A , A =self._decode_helper(snake_case__ , "bpe" )
A , A =self._decode_helper(snake_case__ , "wp" )
A =[]
A =[]
for i in range(snake_case__ ):
A =[char_scores[i], bpe_scores[i], wp_scores[i]]
A =[char_strs[i], bpe_strs[i], wp_strs[i]]
A =scores.index(max(snake_case__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A ={}
A =final_strs
A =final_scores
A =char_strs
A =bpe_strs
A =wp_strs
return out
def _a ( self : int , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A =self.char_decode
A =1
A ="[s]"
elif format == DecodeType.BPE:
A =self.bpe_decode
A =2
A ="#"
elif format == DecodeType.WORDPIECE:
A =self.wp_decode
A =1_02
A ="[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
A , A =[], []
A =pred_logits.size(0 )
A =pred_logits.size(1 )
A , A =pred_logits.topk(1 , dim=-1 , largest=snake_case__ , sorted=snake_case__ )
A =preds_index.view(-1 , snake_case__ )[:, 1:]
A =decoder(snake_case__ )
A , A =torch.nn.functional.softmax(snake_case__ , dim=2 ).max(dim=2 )
A =preds_max_prob[:, 1:]
for index in range(snake_case__ ):
A =preds_str[index].find(snake_case__ )
A =preds_str[index][:pred_eos]
A =preds_index[index].cpu().tolist()
A =pred_index.index(snake_case__ ) if eos_token in pred_index else -1
A =preds_max_prob[index][: pred_eos_index + 1]
A =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(snake_case__ )
conf_scores.append(snake_case__ )
return dec_strs, conf_scores
def _a ( self : Any , snake_case__ : Any ):
"""simple docstring"""
A =[seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(snake_case__ )]
return decode_strs
def _a ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(snake_case__ )
def _a ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
A =[seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(snake_case__ )]
return decode_strs
| 689 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
__a = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__a = BASE_URL + """/user"""
# https://github.com/settings/tokens
__a = os.environ.get("""USER_TOKEN""", """""")
def UpperCamelCase_ ( a_ ) ->dict[Any, Any]:
A ={
"Authorization": f'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(a_ , headers=a_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 689 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __lt__( self : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def UpperCamelCase_ ( a_ ) ->list:
A =[]
# sort into stacks
for element in collection:
A =Stack([element] )
A =bisect_left(a_ , a_ )
if i != len(a_ ):
stacks[i].append(a_ )
else:
stacks.append(a_ )
# use a heap-based merge to merge stack efficiently
A =merge(*(reversed(a_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
from __future__ import annotations
__a = """#"""
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
A ={}
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
A =self._trie
for char in text:
if char not in trie:
A ={}
A =trie[char]
A =True
def _a ( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
A =self._trie
for char in prefix:
if char in trie:
A =trie[char]
else:
return []
return self._elements(snake_case__ )
def _a ( self : Union[str, Any] , snake_case__ : dict ):
"""simple docstring"""
A =[]
for c, v in d.items():
A =[" "] if c == END else [(c + s) for s in self._elements(snake_case__ )]
result.extend(snake_case__ )
return tuple(snake_case__ )
__a = Trie()
__a = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def UpperCamelCase_ ( a_ ) ->tuple:
A =trie.find_word(a_ )
return tuple(string + word for word in suffixes )
def UpperCamelCase_ ( ) ->None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
A =0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[Any]:
A =0
while b > 0:
if b & 1:
A =((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , *snake_case__ : Tuple , **snake_case__ : Any ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """transformers""")
__a = """
{0} = None
"""
__a = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__a = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
A =find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(snake_case__ )
A =find_backend(" if not is_tokenizers_available():" )
self.assertEqual(snake_case__ , "tokenizers" )
A =find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(snake_case__ , "tensorflow_text" )
A =find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(snake_case__ , "sentencepiece_and_tokenizers" )
A =find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(snake_case__ , "sentencepiece_and_tensorflow_text" )
A =find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(snake_case__ , "sentencepiece_and_tokenizers_and_vision" )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("tensorflow_text" , snake_case__ )
self.assertIn("sentencepiece_and_tokenizers" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _a ( self : List[str] ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
import sys
__a = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase_ ( a_ ) ->int:
A =1
for digit in s:
product *= int(a_ )
return product
def UpperCamelCase_ ( a_ = N ) ->int:
A =-sys.maxsize - 1
A =n[:13]
A =13
while cur_index < len(a_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
A =substr[1:] + n[cur_index]
cur_index += 1
else:
A =max(a_ , str_eval(a_ ) )
A =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import re
from filelock import FileLock
try:
import nltk
__a = True
except (ImportError, ModuleNotFoundError):
__a = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def UpperCamelCase_ ( a_ ) ->str:
re.sub("<n>" , "" , a_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a_ ) )
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = DebertaVaTokenizer
_A = DebertaVaTokenizerFast
_A = True
_A = True
def _a ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A =DebertaVaTokenizer(snake_case__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
A ="this is a test"
A ="this is a test"
return input_text, output_text
def _a ( self : Tuple ):
"""simple docstring"""
A ="<pad>"
A =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(snake_case__ ) , 3_00_01 )
def _a ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =" \tHeLLo!how \n Are yoU? "
A =["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , do_lower_case=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , do_lower_case=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _a ( self : List[str] ):
"""simple docstring"""
pass
def _a ( self : Any ):
"""simple docstring"""
A ="I was born in 92000, and this is falsé."
A =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , split_by_punct=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , split_by_punct=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : str ):
"""simple docstring"""
A ="I was born in 92000, and this is falsé."
A =["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A ="I was born in 92000, and this is falsé."
A =["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A ="I was born in 92000, and this is falsé."
A =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =" \tHeLLo!how \n Are yoU? "
A =["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
A =DebertaVaTokenizer(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , do_lower_case=snake_case__ , split_by_punct=snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
A =self.get_tokenizer()
A =self.get_rust_tokenizer()
A ="I was born in 92000, and this is falsé."
A =tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
A =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
self.assertListEqual(snake_case__ , snake_case__ )
A =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
A =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =self.get_rust_tokenizer()
A =tokenizer.encode(snake_case__ )
A =rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A ="This is a test"
A =[13, 1, 43_98, 25, 21, 12_89]
A =["▁", "T", "his", "▁is", "▁a", "▁test"]
A =["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
A =DebertaVaTokenizer(snake_case__ , keep_accents=snake_case__ )
A =DebertaVaTokenizerFast(snake_case__ , keep_accents=snake_case__ )
A =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# fmt: off
A ="I was born in 92000, and this is falsé."
A =[13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
A =["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
A =["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
A =rust_tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A =DebertaVaTokenizer(snake_case__ )
A =tokenizer.encode("sequence builders" )
A =tokenizer.encode("multi-sequence build" )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case__ , )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A ={"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
import numpy as np
def UpperCamelCase_ ( a_ ) ->np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.