code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : int = 384
if "tiny" in model_name:
UpperCamelCase__ : List[str] = [3, 3, 9, 3]
UpperCamelCase__ : int = [96, 192, 384, 768]
if "small" in model_name:
UpperCamelCase__ : Tuple = [3, 3, 27, 3]
UpperCamelCase__ : List[Any] = [96, 192, 384, 768]
if "base" in model_name:
UpperCamelCase__ : List[str] = [3, 3, 27, 3]
UpperCamelCase__ : Any = [128, 256, 512, 1_024]
UpperCamelCase__ : Optional[int] = 512
if "large" in model_name:
UpperCamelCase__ : str = [3, 3, 27, 3]
UpperCamelCase__ : str = [192, 384, 768, 1_536]
UpperCamelCase__ : str = 768
if "xlarge" in model_name:
UpperCamelCase__ : Optional[int] = [3, 3, 27, 3]
UpperCamelCase__ : Optional[int] = [256, 512, 1_024, 2_048]
UpperCamelCase__ : List[str] = 1_024
# set label information
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : Union[str, Any] = 'huggingface/label-files'
UpperCamelCase__ : Tuple = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset') , 'r'))
UpperCamelCase__ : Union[str, Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : int = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : int = ConvNextConfig(
depths=lowerCamelCase_ , hidden_sizes=lowerCamelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
UpperCamelCase__ : List[Any] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight'))
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias'))
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight'))
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter'))
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight'))
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias'))
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight'))
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias'))
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight'))
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias'))
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight'))
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias'))
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight'))
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias'))
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight'))
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias'))
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight'))
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias'))
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
])
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Tuple = dct.pop(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = val
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Optional[int] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
UpperCamelCase__ : Optional[int] = model_name_to_url[model_name]
UpperCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')['state_dict']
UpperCamelCase__ : Tuple = get_upernet_config(lowerCamelCase_)
UpperCamelCase__ : List[Any] = UperNetForSemanticSegmentation(lowerCamelCase_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCamelCase__ : Optional[Any] = state_dict.pop(lowerCamelCase_)
if "bn" in key:
UpperCamelCase__ : Tuple = key.replace('bn' , 'batch_norm')
UpperCamelCase__ : List[Any] = val
# rename keys
UpperCamelCase__ : Union[str, Any] = create_rename_keys(lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
# verify on image
UpperCamelCase__ : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
UpperCamelCase__ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw).convert('RGB')
UpperCamelCase__ : List[Any] = SegformerImageProcessor()
UpperCamelCase__ : Tuple = processor(lowerCamelCase_ , return_tensors='pt').pixel_values
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase_)
if model_name == "upernet-convnext-tiny":
UpperCamelCase__ : List[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]])
elif model_name == "upernet-convnext-small":
UpperCamelCase__ : str = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]])
elif model_name == "upernet-convnext-base":
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]])
elif model_name == "upernet-convnext-large":
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]])
elif model_name == "upernet-convnext-xlarge":
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]])
print('Logits:' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving processor to {pytorch_dump_folder_path}')
processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub')
model.push_to_hub(f'openmmlab/{model_name}')
processor.push_to_hub(f'openmmlab/{model_name}')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : int , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['flax'])
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''flax''']
def __init__( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
requires_backends(self , ['flax'])
@classmethod
def __UpperCamelCase ( cls : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any):
requires_backends(cls , ['flax'])
@classmethod
def __UpperCamelCase ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str):
requires_backends(cls , ['flax'])
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
UpperCamelCase__ : Optional[int] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : List[Any] = torch.manual_seed(0)
UpperCamelCase__ : Dict = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[str] = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_euler')
UpperCamelCase__ : Tuple = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : str = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
sd_pipe.set_scheduler('sample_dpmpp_2m')
UpperCamelCase__ : int = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Union[str, Any] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = False
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCAmelCase__ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCAmelCase__ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCAmelCase__ = reader.read()
lowerCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCAmelCase__ = UNetaDModel(**config)
else:
lowerCAmelCase__ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ = config[key]
del config[key]
lowerCAmelCase__ = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCAmelCase__ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCAmelCase__ = param_value
lowerCAmelCase__ = True
if not has_changed:
lowerCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
lowerCAmelCase__ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : Tuple = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(lowerCamelCase_)
UpperCamelCase__ : str = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
UpperCamelCase__ : Union[str, Any] = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase__ : Dict = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ : Dict = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].')
# Encoder
for layer_index in range(config.num_layers):
UpperCamelCase__ : int = f'layers_{str(lowerCamelCase_)}'
# Self-Attention
UpperCamelCase__ : Any = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
UpperCamelCase__ : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ : Optional[int] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
UpperCamelCase__ : int = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
UpperCamelCase__ : int = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCamelCase__ : Dict = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
UpperCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCamelCase__ : List[Any] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCamelCase__ : List[Any] = flax_model.params['encoder']['block'][str(lowerCamelCase_)]['layer']
UpperCamelCase__ : Tuple = tax_attention_key
UpperCamelCase__ : Optional[Any] = tax_attention_out
UpperCamelCase__ : Tuple = tax_attention_query
UpperCamelCase__ : Union[str, Any] = tax_attention_value
UpperCamelCase__ : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase__ : int = tax_mlp_wi_a
UpperCamelCase__ : int = tax_mlp_wi_a
else:
UpperCamelCase__ : str = tax_mlp_wi
UpperCamelCase__ : Tuple = tax_mlp_wo
UpperCamelCase__ : Union[str, Any] = tax_mlp_layer_norm
UpperCamelCase__ : Any = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase__ : str = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
UpperCamelCase__ : Tuple = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ : Union[str, Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
UpperCamelCase__ : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase__ : Dict = tax_model['target']['encoder']['encoder_norm']['scale']
UpperCamelCase__ : str = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
UpperCamelCase__ : List[Any] = f'layers_{str(lowerCamelCase_)}'
# Self-Attention
UpperCamelCase__ : int = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
UpperCamelCase__ : Optional[int] = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
UpperCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
UpperCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
UpperCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
UpperCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
UpperCamelCase__ : List[Any] = tax_enc_dec_attention_module['key']['kernel']
UpperCamelCase__ : str = tax_enc_dec_attention_module['out']['kernel']
UpperCamelCase__ : List[Any] = tax_enc_dec_attention_module['query']['kernel']
UpperCamelCase__ : Union[str, Any] = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
UpperCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
UpperCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCamelCase__ : int = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
UpperCamelCase__ : str = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCamelCase__ : List[str] = flax_model.params['decoder']['block'][str(lowerCamelCase_)]['layer']
UpperCamelCase__ : List[Any] = tax_attention_key
UpperCamelCase__ : int = tax_attention_out
UpperCamelCase__ : Tuple = tax_attention_query
UpperCamelCase__ : Dict = tax_attention_value
UpperCamelCase__ : int = tax_pre_attention_layer_norm
UpperCamelCase__ : Optional[int] = tax_enc_dec_attention_key
UpperCamelCase__ : List[Any] = tax_enc_dec_attention_out
UpperCamelCase__ : int = tax_enc_dec_attention_query
UpperCamelCase__ : Dict = tax_enc_dec_attention_value
UpperCamelCase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase__ : int = tax_mlp_wi_a
UpperCamelCase__ : Dict = tax_mlp_wi_a
else:
UpperCamelCase__ : int = tax_mlp_wi
UpperCamelCase__ : Any = tax_mlp_wo
UpperCamelCase__ : Any = txa_mlp_layer_norm
UpperCamelCase__ : Tuple = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase__ : Optional[int] = tax_model['target']['decoder']['decoder_norm']['scale']
UpperCamelCase__ : Optional[int] = txa_decoder_norm
# Only for layer 0:
UpperCamelCase__ : Any = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
UpperCamelCase__ : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase__ : Dict = tax_model['target']['token_embedder']['embedding']
UpperCamelCase__ : int = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase__ : Union[str, Any] = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCamelCase_)
print('T5X Model was sucessfully converted!')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( lowerCamelCase_=None) -> Any:
if subparsers is not None:
UpperCamelCase__ : Optional[Any] = subparsers.add_parser('test')
else:
UpperCamelCase__ : Any = argparse.ArgumentParser('Accelerate test command')
parser.add_argument(
'--config_file' , default=lowerCamelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_)
return parser
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Dict = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ['test_utils', 'scripts', 'test_script.py'])
if args.config_file is None:
UpperCamelCase__ : str = script_name
else:
UpperCamelCase__ : Tuple = f'--config_file={args.config_file} {script_name}'
UpperCamelCase__ : Tuple = ['accelerate-launch'] + test_args.split()
UpperCamelCase__ : Dict = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy())
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!')
def __UpperCAmelCase ( ) -> Dict:
UpperCamelCase__ : Tuple = test_command_parser()
UpperCamelCase__ : Any = parser.parse_args()
test_command(lowerCamelCase_)
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = 'src/diffusers'
lowerCAmelCase__ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
return line.startswith(lowerCamelCase_) or len(lowerCamelCase_) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase_) is not None
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Dict = object_name.split('.')
UpperCamelCase__ : Any = 0
# First let's find the module where our object lives.
UpperCamelCase__ : str = parts[i]
while i < len(lowerCamelCase_) and not os.path.isfile(os.path.join(lowerCamelCase_ , f'{module}.py')):
i += 1
if i < len(lowerCamelCase_):
UpperCamelCase__ : Optional[int] = os.path.join(lowerCamelCase_ , parts[i])
if i >= len(lowerCamelCase_):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.')
with open(os.path.join(lowerCamelCase_ , f'{module}.py') , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase__ : List[str] = f.readlines()
# Now let's find the class / func in the code!
UpperCamelCase__ : int = ''
UpperCamelCase__ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase_) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase_):
raise ValueError(f' {object_name} does not match any function or class in {module}.')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCamelCase__ : Tuple = line_index
while line_index < len(lowerCamelCase_) and _should_continue(lines[line_index] , lowerCamelCase_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__ : List[str] = lines[start_index:line_index]
return "".join(lowerCamelCase_)
lowerCAmelCase__ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowerCAmelCase__ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
lowerCAmelCase__ = re.compile(R'<FILL\s+[^>]*>')
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Dict = code.split('\n')
UpperCamelCase__ : List[Any] = 0
while idx < len(lowerCamelCase_) and len(lines[idx]) == 0:
idx += 1
if idx < len(lowerCamelCase_):
return re.search(R'^(\s*)\S' , lines[idx]).groups()[0]
return ""
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Dict = len(get_indent(lowerCamelCase_)) > 0
if has_indent:
UpperCamelCase__ : Dict = f'class Bla:\n{code}'
UpperCamelCase__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCamelCase_)
UpperCamelCase__ : Tuple = black.format_str(lowerCamelCase_ , mode=lowerCamelCase_)
UpperCamelCase__, UpperCamelCase__ : Any = style_docstrings_in_code(lowerCamelCase_)
return result[len('class Bla:\n') :] if has_indent else result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> Dict:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase__ : Optional[int] = f.readlines()
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase_):
UpperCamelCase__ : Any = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = search.groups()
UpperCamelCase__ : Optional[Any] = find_code_in_diffusers(lowerCamelCase_)
UpperCamelCase__ : Dict = get_indent(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCamelCase__ : Optional[int] = theoretical_indent
UpperCamelCase__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCamelCase__ : List[Any] = True
while line_index < len(lowerCamelCase_) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase_):
break
UpperCamelCase__ : Dict = lines[line_index]
UpperCamelCase__ : List[str] = _should_continue(lowerCamelCase_ , lowerCamelCase_) and re.search(f'^{indent}# End copy' , lowerCamelCase_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
UpperCamelCase__ : str = lines[start_index:line_index]
UpperCamelCase__ : Dict = ''.join(lowerCamelCase_)
# Remove any nested `Copied from` comments to avoid circular copies
UpperCamelCase__ : List[Any] = [line for line in theoretical_code.split('\n') if _re_copy_warning.search(lowerCamelCase_) is None]
UpperCamelCase__ : Dict = '\n'.join(lowerCamelCase_)
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase_) > 0:
UpperCamelCase__ : List[str] = replace_pattern.replace('with' , '').split(',')
UpperCamelCase__ : Optional[int] = [_re_replace_pattern.search(lowerCamelCase_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = pattern.groups()
UpperCamelCase__ : Optional[Any] = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
if option.strip() == "all-casing":
UpperCamelCase__ : Union[str, Any] = re.sub(obja.lower() , obja.lower() , lowerCamelCase_)
UpperCamelCase__ : Tuple = re.sub(obja.upper() , obja.upper() , lowerCamelCase_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCamelCase__ : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code)
UpperCamelCase__ : List[str] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
UpperCamelCase__ : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCamelCase__ : str = start_index + 1
if overwrite and len(lowerCamelCase_) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.')
with open(lowerCamelCase_ , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(lowerCamelCase_)
return diffs
def __UpperCAmelCase ( lowerCamelCase_ = False) -> Dict:
UpperCamelCase__ : List[Any] = glob.glob(os.path.join(lowerCamelCase_ , '**/*.py') , recursive=lowerCamelCase_)
UpperCamelCase__ : Tuple = []
for filename in all_files:
UpperCamelCase__ : int = is_copy_consistent(lowerCamelCase_ , lowerCamelCase_)
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCamelCase_) > 0:
UpperCamelCase__ : int = '\n'.join(lowerCamelCase_)
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.')
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 256
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''melgan''']
def __init__( self : int , UpperCAmelCase_ : SpectrogramNotesEncoder , UpperCAmelCase_ : SpectrogramContEncoder , UpperCAmelCase_ : TaFilmDecoder , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1e-5) # Matches MelGAN training.
UpperCamelCase__ : Tuple = 4.0 # Largest value for most examples
UpperCamelCase__ : str = 128
self.register_modules(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=(-1.0, 1.0) , UpperCAmelCase_ : Union[str, Any]=False):
UpperCamelCase__, UpperCamelCase__ : Dict = output_range
if clip:
UpperCamelCase__ : Any = torch.clip(UpperCAmelCase_ , self.min_value , self.max_value)
# Scale to [0, 1].
UpperCamelCase__ : Dict = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=(-1.0, 1.0) , UpperCAmelCase_ : Dict=False):
UpperCamelCase__, UpperCamelCase__ : Any = input_range
UpperCamelCase__ : Optional[Any] = torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Tuple = input_tokens > 0
UpperCamelCase__, UpperCamelCase__ : Dict = self.notes_encoder(
encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = noise_time
if not torch.is_tensor(UpperCAmelCase_):
UpperCamelCase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
UpperCamelCase__ : Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Optional[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
UpperCamelCase__ : Any = self.decoder(
encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_)
return logits
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[List[int]] , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "numpy" , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCAmelCase_)}.')
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
UpperCamelCase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa)
UpperCamelCase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
for i, encoder_input_tokens in enumerate(UpperCAmelCase_):
if i == 0:
UpperCamelCase__ : List[str] = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : Union[str, Any] = ones
UpperCamelCase__ : Tuple = self.scale_features(
UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
UpperCamelCase__ : Any = self.decode(
encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
UpperCamelCase__ : List[str] = self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0])
UpperCamelCase__ : List[str] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_)
logger.info('Generated segment' , UpperCAmelCase_)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.')
if output_type == "numpy":
UpperCamelCase__ : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
UpperCamelCase__ : Optional[int] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase_)
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[int] = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase_)
if number < 0:
return False
UpperCamelCase__ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
lowerCAmelCase__ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowerCAmelCase__ = ['a', 'b', 'c', 'd', 'e']
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Optional[int] = start
# add current to visited
visited.append(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# if all neighbors visited add current to sort
sort.append(lowerCamelCase_)
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase_) != len(lowerCamelCase_):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('a', [], [])
print(sort)
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __lowercase (__lowerCamelCase ):
def __init__( self : List[str] , **UpperCAmelCase_ : Any):
super().__init__(**UpperCAmelCase_)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Dict):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , **UpperCAmelCase_ : Dict):
UpperCamelCase__ : List[str] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
UpperCamelCase__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[int]="This is a photo of {}."):
UpperCamelCase__ : Union[str, Any] = load_image(UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase__ : Dict = candidate_labels
UpperCamelCase__ : Any = [hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels]
UpperCamelCase__ : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = [text_inputs]
return inputs
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Union[str, Any] = model_inputs.pop('candidate_labels')
UpperCamelCase__ : Optional[Any] = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , UpperCAmelCase_):
UpperCamelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ : List[Any] = text_inputs[0][0]
UpperCamelCase__ : Dict = self.model(**UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = model_outputs.pop('candidate_labels')
UpperCamelCase__ : Any = model_outputs['logits'][0]
if self.framework == "pt":
UpperCamelCase__ : Union[str, Any] = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase__ : str = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Dict = [scores]
elif self.framework == "tf":
UpperCamelCase__ : str = stable_softmax(UpperCAmelCase_ , axis=-1)
UpperCamelCase__ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase__ : Dict = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0])
]
return result
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''efficientnet'''
def __init__( self : Any , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2_560 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.5 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : List[str] = image_size
UpperCamelCase__ : List[str] = width_coefficient
UpperCamelCase__ : Union[str, Any] = depth_coefficient
UpperCamelCase__ : Tuple = depth_divisor
UpperCamelCase__ : Optional[Any] = kernel_sizes
UpperCamelCase__ : Optional[int] = in_channels
UpperCamelCase__ : Any = out_channels
UpperCamelCase__ : Union[str, Any] = depthwise_padding
UpperCamelCase__ : str = strides
UpperCamelCase__ : Any = num_block_repeats
UpperCamelCase__ : Any = expand_ratios
UpperCamelCase__ : Optional[Any] = squeeze_expansion_ratio
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Union[str, Any] = hidden_dim
UpperCamelCase__ : List[str] = pooling_type
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : int = batch_norm_eps
UpperCamelCase__ : List[Any] = batch_norm_momentum
UpperCamelCase__ : Optional[int] = dropout_rate
UpperCamelCase__ : Optional[Any] = drop_connect_rate
UpperCamelCase__ : Union[str, Any] = sum(UpperCAmelCase_) * 4
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Tuple):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : List[Any]):
return 1e-5
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = StableDiffusionSAGPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = False
def __UpperCamelCase ( self : List[Any]):
torch.manual_seed(0)
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase__ : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ : List[Any] = CLIPTextModel(UpperCAmelCase_)
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
UpperCamelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=0):
if str(UpperCAmelCase_).startswith('mps'):
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(UpperCAmelCase_)
else:
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self : Optional[Any]):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : str = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
UpperCamelCase__ : Optional[Any] = sag_pipe.to(UpperCAmelCase_)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Dict = '.'
UpperCamelCase__ : Any = torch.manual_seed(0)
UpperCamelCase__ : Dict = sag_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : List[Any] = sag_pipe.to(UpperCAmelCase_)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = '.'
UpperCamelCase__ : List[Any] = torch.manual_seed(0)
UpperCamelCase__ : List[str] = sag_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Dict = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
UpperCamelCase__ : List[str] = sag_pipe.to(UpperCAmelCase_)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = '.'
UpperCamelCase__ : List[str] = torch.manual_seed(0)
UpperCamelCase__ : List[Any] = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase__ : List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __lowercase :
def __init__( self : int , UpperCAmelCase_ : int):
UpperCamelCase__ : int = value
UpperCamelCase__ : Node | None = None
UpperCamelCase__ : Node | None = None
class __lowercase :
def __init__( self : int , UpperCAmelCase_ : Node):
UpperCamelCase__ : Optional[int] = tree
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Node | None):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self : str):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''vit_mae'''
def __init__( self : List[Any] , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=2_048 , UpperCAmelCase_ : List[Any]=0.75 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : Optional[int] = image_size
UpperCamelCase__ : List[Any] = patch_size
UpperCamelCase__ : str = num_channels
UpperCamelCase__ : Dict = qkv_bias
UpperCamelCase__ : Tuple = decoder_num_attention_heads
UpperCamelCase__ : int = decoder_hidden_size
UpperCamelCase__ : Any = decoder_num_hidden_layers
UpperCamelCase__ : Optional[int] = decoder_intermediate_size
UpperCamelCase__ : List[str] = mask_ratio
UpperCamelCase__ : Dict = norm_pix_loss
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[tuple[int, int]]:
UpperCamelCase__, UpperCamelCase__ : Optional[int] = position
UpperCamelCase__ : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase__ : Any = []
for position in positions:
UpperCamelCase__, UpperCamelCase__ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase_)
return permissible_positions
def __UpperCAmelCase ( lowerCamelCase_) -> bool:
return not any(elem == 0 for row in board for elem in row)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> bool:
if is_complete(lowerCamelCase_):
return True
for position in get_valid_pos(lowerCamelCase_ , len(lowerCamelCase_)):
UpperCamelCase__, UpperCamelCase__ : str = position
if board[y][x] == 0:
UpperCamelCase__ : Optional[Any] = curr + 1
if open_knight_tour_helper(lowerCamelCase_ , lowerCamelCase_ , curr + 1):
return True
UpperCamelCase__ : List[str] = 0
return False
def __UpperCAmelCase ( lowerCamelCase_) -> list[list[int]]:
UpperCamelCase__ : List[str] = [[0 for i in range(lowerCamelCase_)] for j in range(lowerCamelCase_)]
for i in range(lowerCamelCase_):
for j in range(lowerCamelCase_):
UpperCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(lowerCamelCase_ , (i, j) , 1):
return board
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Dict = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
from torch import nn
class __lowercase (nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]):
super().__init__()
UpperCamelCase__ : Dict = class_size
UpperCamelCase__ : Tuple = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase__ : Dict = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any]):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
UpperCamelCase__ : int = self.mlp(UpperCAmelCase_)
return logits
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple:
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = tmp_path / 'cache'
UpperCamelCase__ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Union[str, Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_).read()
_check_json_dataset(lowerCamelCase_ , lowerCamelCase_)
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[int] = tmp_path / 'cache'
UpperCamelCase__ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase__ : Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ : Tuple = (
Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase__ : Dict = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
_check_json_dataset(lowerCamelCase_ , lowerCamelCase_)
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : int = tmp_path / 'cache'
UpperCamelCase__ : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
UpperCamelCase__ : Any = features.copy() if features else default_expected_features
UpperCamelCase__ : Optional[int] = (
Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase__ : Dict = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ : Optional[Any] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
UpperCamelCase__ : Union[str, Any] = features.copy()
UpperCamelCase__ : Union[str, Any] = (
Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase__ : str = tmp_path / 'cache'
UpperCamelCase__ : Optional[int] = JsonDatasetReader(lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : int = tmp_path / 'cache'
UpperCamelCase__ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase__ : Optional[Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_ , split=lowerCamelCase_).read()
_check_json_dataset(lowerCamelCase_ , lowerCamelCase_)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
if issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Optional[Any] = jsonl_path
elif issubclass(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = [jsonl_path]
UpperCamelCase__ : Tuple = tmp_path / 'cache'
UpperCamelCase__ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase__ : Optional[Any] = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
_check_json_dataset(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=("train",)) -> Optional[int]:
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
for split in splits:
UpperCamelCase__ : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = tmp_path / 'cache'
UpperCamelCase__ : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_).read()
_check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_)
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : List[str] = tmp_path / 'cache'
UpperCamelCase__ : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase__ : Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ : List[str] = (
Features({feature: Value(lowerCamelCase_) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase__ : str = JsonDatasetReader({'train': jsonl_path} , features=lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
_check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_)
@pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test'])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
if split:
UpperCamelCase__ : Tuple = {split: jsonl_path}
else:
UpperCamelCase__ : int = 'train'
UpperCamelCase__ : List[str] = {'train': jsonl_path, 'test': jsonl_path}
UpperCamelCase__ : Tuple = tmp_path / 'cache'
UpperCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCamelCase__ : int = JsonDatasetReader(lowerCamelCase_ , cache_dir=lowerCamelCase_).read()
_check_json_datasetdict(lowerCamelCase_ , lowerCamelCase_ , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
return json.load(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return [json.loads(lowerCamelCase_) for line in buffer]
class __lowercase :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_).write()
buffer.seek(0)
UpperCamelCase__ : Optional[Any] = load_json_function(UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(exported_content[0] , UpperCAmelCase_)
assert len(UpperCAmelCase_) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , orient=UpperCAmelCase_).write()
buffer.seek(0)
UpperCamelCase__ : Optional[int] = load_json(UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase_ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(UpperCAmelCase_) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)])
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , num_proc=2).write()
buffer.seek(0)
UpperCamelCase__ : str = load_json_function(UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(exported_content[0] , UpperCAmelCase_)
assert len(UpperCAmelCase_) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789'), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , lines=UpperCAmelCase_ , orient=UpperCAmelCase_ , num_proc=2).write()
buffer.seek(0)
UpperCamelCase__ : Union[str, Any] = load_json(UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase_ , 'keys') and not hasattr(exported_content[0] , 'keys')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(UpperCAmelCase_) == 10
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int]):
with pytest.raises(UpperCAmelCase_):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , num_proc=0)
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
UpperCamelCase__ : int = tmp_path_factory.mktemp('data') / F'test.json.{extension}'
UpperCamelCase__ : Dict = str(shared_datadir / F'test_file.json.{extension}')
JsonDatasetWriter(UpperCAmelCase_ , UpperCAmelCase_ , compression=UpperCAmelCase_).write()
with fsspec.open(UpperCAmelCase_ , 'rb' , compression='infer') as f:
UpperCamelCase__ : Optional[int] = f.read()
with fsspec.open(UpperCAmelCase_ , 'rb' , compression='infer') as f:
UpperCamelCase__ : int = f.read()
assert exported_content == original_content
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''unispeech'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Dict=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Any="group" , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : int=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=128 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Tuple=320 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : int="mean" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=256 , UpperCAmelCase_ : List[Any]=80 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=0.5 , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Tuple = feat_extract_norm
UpperCamelCase__ : Any = feat_extract_activation
UpperCamelCase__ : Any = list(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = list(UpperCAmelCase_)
UpperCamelCase__ : int = list(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = conv_bias
UpperCamelCase__ : Union[str, Any] = num_conv_pos_embeddings
UpperCamelCase__ : str = num_conv_pos_embedding_groups
UpperCamelCase__ : Union[str, Any] = len(self.conv_dim)
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : str = hidden_dropout
UpperCamelCase__ : Optional[int] = attention_dropout
UpperCamelCase__ : List[str] = activation_dropout
UpperCamelCase__ : Any = feat_proj_dropout
UpperCamelCase__ : Any = final_dropout
UpperCamelCase__ : Union[str, Any] = layerdrop
UpperCamelCase__ : Tuple = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Tuple = num_ctc_classes
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : Optional[int] = do_stable_layer_norm
UpperCamelCase__ : List[Any] = use_weighted_layer_sum
UpperCamelCase__ : int = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : int = apply_spec_augment
UpperCamelCase__ : Tuple = mask_time_prob
UpperCamelCase__ : List[Any] = mask_time_length
UpperCamelCase__ : List[Any] = mask_time_min_masks
UpperCamelCase__ : List[Any] = mask_feature_prob
UpperCamelCase__ : Tuple = mask_feature_length
UpperCamelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Dict = num_codevectors_per_group
UpperCamelCase__ : Tuple = num_codevector_groups
UpperCamelCase__ : List[Any] = contrastive_logits_temperature
UpperCamelCase__ : int = feat_quantizer_dropout
UpperCamelCase__ : Union[str, Any] = num_negatives
UpperCamelCase__ : List[str] = codevector_dim
UpperCamelCase__ : List[Any] = proj_codevector_dim
UpperCamelCase__ : Optional[Any] = diversity_loss_weight
# ctc loss
UpperCamelCase__ : Any = ctc_loss_reduction
UpperCamelCase__ : Optional[int] = ctc_zero_infinity
# pretraining loss
UpperCamelCase__ : List[Any] = replace_prob
@property
def __UpperCamelCase ( self : Any):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative')
if not cash_flows:
raise ValueError('Cash flows list cannot be empty')
UpperCamelCase__ : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase_))
return round(lowerCamelCase_ , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Optional[Any] = len(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = sum(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = [[False for x in range(s + 1)] for y in range(n + 1)]
for i in range(1 , n + 1):
UpperCamelCase__ : List[str] = True
for i in range(1 , s + 1):
UpperCamelCase__ : Any = False
for i in range(1 , n + 1):
for j in range(1 , s + 1):
UpperCamelCase__ : str = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCamelCase__ : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2) , -1 , -1):
if dp[n][j] is True:
UpperCamelCase__ : Any = s - 2 * j
break
return diff
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('DataClass', Any)
lowerCAmelCase__ = NewType('DataClassType', Any)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).')
def __UpperCAmelCase ( lowerCamelCase_) -> Callable[[str], Any]:
UpperCamelCase__ : Any = {str(lowerCamelCase_): choice for choice in choices}
return lambda lowerCamelCase_: str_to_choice.get(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( *,
lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase__ : int = {}
if aliases is not None:
UpperCamelCase__ : Optional[Any] = aliases
if help is not None:
UpperCamelCase__ : Optional[int] = help
return dataclasses.field(metadata=lowerCamelCase_ , default=lowerCamelCase_ , default_factory=lowerCamelCase_ , **lowerCamelCase_)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = 42
def __init__( self : int , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Union[str, Any]):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase__ : List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_)
if dataclasses.is_dataclass(UpperCAmelCase_):
UpperCamelCase__ : int = [dataclass_types]
UpperCamelCase__ : List[str] = list(UpperCAmelCase_)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_)
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field):
UpperCamelCase__ : int = F'--{field.name}'
UpperCamelCase__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default')
UpperCamelCase__ : str = kwargs.pop('aliases' , [])
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [aliases]
UpperCamelCase__ : Optional[int] = getattr(field.type , '__origin__' , field.type)
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType') and isinstance(UpperCAmelCase_ , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(UpperCAmelCase_) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.')
if type(UpperCAmelCase_) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase__ : Any = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase__ : Dict = getattr(field.type , '__origin__' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase__ : List[Any] = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1]) else field.type.__args__[1]
)
UpperCamelCase__ : List[Any] = getattr(field.type , '__origin__' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase__ : Any = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_) and issubclass(field.type , UpperCAmelCase_)):
if origin_type is Literal:
UpperCamelCase__ : int = field.type.__args__
else:
UpperCamelCase__ : List[Any] = [x.value for x in field.type]
UpperCamelCase__ : str = make_choice_type_function(kwargs['choices'])
if field.default is not dataclasses.MISSING:
UpperCamelCase__ : Any = field.default
else:
UpperCamelCase__ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase__ : Union[str, Any] = copy(UpperCAmelCase_)
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase__ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase__ : List[str] = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase__ : str = '?'
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase__ : Union[str, Any] = True
elif isclass(UpperCAmelCase_) and issubclass(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Dict = field.type.__args__[0]
UpperCamelCase__ : Optional[int] = '+'
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase__ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase__ : List[Any] = True
else:
UpperCamelCase__ : List[str] = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase__ : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase__ : Optional[Any] = field.default_factory()
else:
UpperCamelCase__ : Union[str, Any] = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase__ : Union[str, Any] = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : DataClassType):
if hasattr(UpperCAmelCase_ , '_argument_group_name'):
UpperCamelCase__ : Any = self.add_argument_group(dtype._argument_group_name)
else:
UpperCamelCase__ : Union[str, Any] = self
try:
UpperCamelCase__ : Dict[str, type] = get_type_hints(UpperCAmelCase_)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_):
UpperCamelCase__ : str = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.') from ex
raise
for field in dataclasses.fields(UpperCAmelCase_):
if not field.init:
continue
UpperCamelCase__ : int = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
UpperCamelCase__ : Optional[int] = []
if args_filename:
args_files.append(Path(UpperCAmelCase_))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('.args'))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase__ : List[str] = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append')
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase__, UpperCamelCase__ : Optional[int] = args_file_parser.parse_known_args(args=UpperCAmelCase_)
UpperCamelCase__ : str = vars(UpperCAmelCase_).get(args_file_flag.lstrip('-') , UpperCAmelCase_)
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_) for p in cmd_args_file_paths])
UpperCamelCase__ : str = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase__ : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase__, UpperCamelCase__ : List[Any] = self.parse_known_args(args=UpperCAmelCase_)
UpperCamelCase__ : int = []
for dtype in self.dataclass_types:
UpperCamelCase__ : Dict = {f.name for f in dataclasses.fields(UpperCAmelCase_) if f.init}
UpperCamelCase__ : List[Any] = {k: v for k, v in vars(UpperCAmelCase_).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = dtype(**UpperCAmelCase_)
outputs.append(UpperCAmelCase_)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False):
UpperCamelCase__ : List[str] = set(args.keys())
UpperCamelCase__ : List[Any] = []
for dtype in self.dataclass_types:
UpperCamelCase__ : Union[str, Any] = {f.name for f in dataclasses.fields(UpperCAmelCase_) if f.init}
UpperCamelCase__ : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
UpperCamelCase__ : List[str] = dtype(**UpperCAmelCase_)
outputs.append(UpperCAmelCase_)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_)}')
return tuple(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False):
with open(Path(UpperCAmelCase_) , encoding='utf-8') as open_json_file:
UpperCamelCase__ : Union[str, Any] = json.loads(open_json_file.read())
UpperCamelCase__ : List[str] = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False):
UpperCamelCase__ : Dict = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_).read_text()) , allow_extra_keys=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __lowercase :
_lowerCamelCase = True
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = "PIL.Image.Image"
_lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowerCamelCase = field(default='''Image''' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : Union[str, Any]):
return self.pa_type
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : int = np.array(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase_ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase_)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : dict , UpperCAmelCase_ : int=None):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
UpperCamelCase__ : int = {}
UpperCamelCase__, UpperCamelCase__ : int = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(UpperCAmelCase_):
UpperCamelCase__ : int = PIL.Image.open(UpperCAmelCase_)
else:
UpperCamelCase__ : str = path.split('::')[-1]
try:
UpperCamelCase__ : Union[str, Any] = string_to_dict(UpperCAmelCase_ , config.HUB_DATASETS_URL)['repo_id']
UpperCamelCase__ : Optional[Any] = token_per_repo_id.get(UpperCAmelCase_)
except ValueError:
UpperCamelCase__ : Any = None
with xopen(UpperCAmelCase_ , 'rb' , use_auth_token=UpperCAmelCase_) as f:
UpperCamelCase__ : Optional[int] = BytesIO(f.read())
UpperCamelCase__ : Tuple = PIL.Image.open(bytes_)
else:
UpperCamelCase__ : List[Any] = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCamelCase ( self : List[str]):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray]):
if pa.types.is_string(storage.type):
UpperCamelCase__ : Dict = pa.array([None] * len(UpperCAmelCase_) , type=pa.binary())
UpperCamelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCamelCase__ : Tuple = pa.array([None] * len(UpperCAmelCase_) , type=pa.string())
UpperCamelCase__ : Optional[int] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
UpperCamelCase__ : Tuple = storage.field('bytes')
else:
UpperCamelCase__ : Tuple = pa.array([None] * len(UpperCAmelCase_) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
UpperCamelCase__ : int = storage.field('path')
else:
UpperCamelCase__ : List[str] = pa.array([None] * len(UpperCAmelCase_) , type=pa.string())
UpperCamelCase__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
UpperCamelCase__ : Optional[Any] = pa.array(
[encode_np_array(np.array(UpperCAmelCase_))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ : Optional[Any] = pa.array([None] * len(UpperCAmelCase_) , type=pa.string())
UpperCamelCase__ : Optional[int] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase_ , self.pa_type)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : pa.StructArray):
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : int):
with xopen(UpperCAmelCase_ , 'rb') as f:
UpperCamelCase__ : List[str] = f.read()
return bytes_
UpperCamelCase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ : List[Any] = pa.array(
[os.path.basename(UpperCAmelCase_) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
UpperCamelCase__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(UpperCAmelCase_ , self.pa_type)
def __UpperCAmelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ : Tuple = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def __UpperCAmelCase ( lowerCamelCase_) -> bytes:
UpperCamelCase__ : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ : Dict = image.format
else:
UpperCamelCase__ : List[str] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCamelCase_ , format=lowerCamelCase_)
return buffer.getvalue()
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
if hasattr(lowerCamelCase_ , 'filename') and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_)}
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
UpperCamelCase__ : List[Any] = array.dtype
UpperCamelCase__ : Union[str, Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCamelCase__ : Optional[int] = dtype.kind
UpperCamelCase__ : int = dtype.itemsize
UpperCamelCase__ : List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ : Union[str, Any] = np.dtype('|u1')
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.')
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ : List[str] = dtype_byteorder + dtype_kind + str(lowerCamelCase_)
UpperCamelCase__ : Any = np.dtype(lowerCamelCase_)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}')
UpperCamelCase__ : Optional[int] = PIL.Image.fromarray(array.astype(lowerCamelCase_))
return {"path": None, "bytes": image_to_bytes(lowerCamelCase_)}
def __UpperCAmelCase ( lowerCamelCase_) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if objs:
UpperCamelCase__, UpperCamelCase__ : str = first_non_null_value(lowerCamelCase_)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCamelCase_ , np.ndarray):
UpperCamelCase__ : List[str] = no_op_if_value_is_null(lowerCamelCase_)
return [obj_to_image_dict_func(lowerCamelCase_) for obj in objs]
elif isinstance(lowerCamelCase_ , PIL.Image.Image):
UpperCamelCase__ : Union[str, Any] = no_op_if_value_is_null(lowerCamelCase_)
return [obj_to_image_dict_func(lowerCamelCase_) for obj in objs]
else:
return objs
else:
return objs
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase__ : List[str] = MobileBertConfig.from_json_file(lowerCamelCase_)
print(f'Building PyTorch model from configuration: {config}')
UpperCamelCase__ : Dict = MobileBertForPreTraining(lowerCamelCase_)
# Load weights from tf checkpoint
UpperCamelCase__ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : str = question_encoder
UpperCamelCase__ : Optional[int] = generator
UpperCamelCase__ : Optional[int] = self.question_encoder
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any):
if os.path.isfile(UpperCAmelCase_):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
UpperCamelCase__ : int = os.path.join(UpperCAmelCase_ , 'question_encoder_tokenizer')
UpperCamelCase__ : Any = os.path.join(UpperCAmelCase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(UpperCAmelCase_)
self.generator.save_pretrained(UpperCAmelCase_)
@classmethod
def __UpperCamelCase ( cls : str , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase__ : Tuple = kwargs.pop('config' , UpperCAmelCase_)
if config is None:
UpperCamelCase__ : int = RagConfig.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained(
UpperCAmelCase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=UpperCAmelCase_ , generator=UpperCAmelCase_)
def __call__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
return self.current_tokenizer(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str):
return self.generator.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict):
return self.generator.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[Any] = self.question_encoder
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = self.generator
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "longest" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Tuple , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , UpperCAmelCase_ , )
if max_length is None:
UpperCamelCase__ : int = self.current_tokenizer.model_max_length
UpperCamelCase__ : List[Any] = self(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase__ : List[Any] = self.current_tokenizer.model_max_length
UpperCamelCase__ : Tuple = self(
text_target=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = labels['input_ids']
return model_inputs
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
import string
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : Optional[int] = ''
for i in sequence:
UpperCamelCase__ : str = ord(lowerCamelCase_)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : int = string.ascii_letters
UpperCamelCase__ : Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCamelCase_)] if c in letters else c for c in sequence)
def __UpperCAmelCase ( ) -> None:
from timeit import timeit
print('Running performance benchmarks...')
UpperCamelCase__ : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCamelCase_)} seconds')
print(f'> atbash(): {timeit("atbash(printable)" , setup=lowerCamelCase_)} seconds')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowercase (unittest.TestCase , __lowerCamelCase ):
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = load_tool('text-to-speech')
self.tool.setup()
def __UpperCamelCase ( self : str):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
UpperCamelCase__ : Tuple = self.tool('hey')
UpperCamelCase__ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , ))
def __UpperCamelCase ( self : Union[str, Any]):
# SpeechT5 isn't deterministic
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = self.tool('hey')
UpperCamelCase__ : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , ))
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'PoolFormerConfig'
# Base docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = [1, 512, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'sail/poolformer_s12'
lowerCAmelCase__ = 'tabby, tabby cat'
lowerCAmelCase__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = 0.0 , lowerCamelCase_ = False) -> Union[str, Any]:
if drop_prob == 0.0 or not training:
return input
UpperCamelCase__ : Optional[Any] = 1 - drop_prob
UpperCamelCase__ : Dict = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCamelCase__ : Dict = keep_prob + torch.rand(lowerCamelCase_ , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
UpperCamelCase__ : int = input.div(lowerCamelCase_) * random_tensor
return output
class __lowercase (nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[float] = None):
super().__init__()
UpperCamelCase__ : Any = drop_prob
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : torch.Tensor):
return drop_path(UpperCAmelCase_ , self.drop_prob , self.training)
def __UpperCamelCase ( self : Any):
return "p={}".format(self.drop_prob)
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None):
super().__init__()
UpperCamelCase__ : Dict = patch_size if isinstance(UpperCAmelCase_ , collections.abc.Iterable) else (patch_size, patch_size)
UpperCamelCase__ : Dict = stride if isinstance(UpperCAmelCase_ , collections.abc.Iterable) else (stride, stride)
UpperCamelCase__ : Optional[Any] = padding if isinstance(UpperCAmelCase_ , collections.abc.Iterable) else (padding, padding)
UpperCamelCase__ : Dict = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_)
UpperCamelCase__ : int = norm_layer(UpperCAmelCase_) if norm_layer else nn.Identity()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : Optional[int] = self.projection(UpperCAmelCase_)
UpperCamelCase__ : Dict = self.norm(UpperCAmelCase_)
return embeddings
class __lowercase (nn.GroupNorm ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
super().__init__(1 , UpperCAmelCase_ , **UpperCAmelCase_)
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
super().__init__()
UpperCamelCase__ : Optional[int] = nn.AvgPoolad(UpperCAmelCase_ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.pool(UpperCAmelCase_) - hidden_states
class __lowercase (nn.Module ):
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]):
super().__init__()
UpperCamelCase__ : Union[str, Any] = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1)
UpperCamelCase__ : str = nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , 1)
UpperCamelCase__ : Dict = PoolFormerDropPath(UpperCAmelCase_)
if isinstance(config.hidden_act , UpperCAmelCase_):
UpperCamelCase__ : Tuple = ACTaFN[config.hidden_act]
else:
UpperCamelCase__ : int = config.hidden_act
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Dict = self.conva(UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.act_fn(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.drop(UpperCAmelCase_)
UpperCamelCase__ : Dict = self.conva(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = self.drop(UpperCAmelCase_)
return hidden_states
class __lowercase (nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
super().__init__()
UpperCamelCase__ : Any = PoolFormerPooling(UpperCAmelCase_)
UpperCamelCase__ : Dict = PoolFormerOutput(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[str] = PoolFormerGroupNorm(UpperCAmelCase_)
UpperCamelCase__ : str = PoolFormerGroupNorm(UpperCAmelCase_)
# Useful for training neural nets
UpperCamelCase__ : str = PoolFormerDropPath(UpperCAmelCase_) if drop_path > 0.0 else nn.Identity()
UpperCamelCase__ : Optional[Any] = config.use_layer_scale
if config.use_layer_scale:
UpperCamelCase__ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase_)) , requires_grad=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCAmelCase_)) , requires_grad=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str):
if self.use_layer_scale:
UpperCamelCase__ : Dict = self.pooling(self.before_norm(UpperCAmelCase_))
UpperCamelCase__ : int = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
UpperCamelCase__ : Tuple = hidden_states + self.drop_path(UpperCAmelCase_)
UpperCamelCase__ : int = ()
UpperCamelCase__ : List[str] = self.output(self.after_norm(UpperCAmelCase_))
UpperCamelCase__ : Optional[Any] = self.layer_scale_a.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
UpperCamelCase__ : Tuple = hidden_states + self.drop_path(UpperCAmelCase_)
UpperCamelCase__ : Dict = (output,) + outputs
return outputs
else:
UpperCamelCase__ : Union[str, Any] = self.drop_path(self.pooling(self.before_norm(UpperCAmelCase_)))
# First residual connection
UpperCamelCase__ : Optional[Any] = pooling_output + hidden_states
UpperCamelCase__ : Dict = ()
# Second residual connection inside the PoolFormerOutput block
UpperCamelCase__ : int = self.drop_path(self.output(self.after_norm(UpperCAmelCase_)))
UpperCamelCase__ : int = hidden_states + layer_output
UpperCamelCase__ : Optional[int] = (output,) + outputs
return outputs
class __lowercase (nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any]):
super().__init__()
UpperCamelCase__ : Optional[int] = config
# stochastic depth decay rule
UpperCamelCase__ : Optional[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths))]
# patch embeddings
UpperCamelCase__ : List[str] = []
for i in range(config.num_encoder_blocks):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ))
UpperCamelCase__ : Union[str, Any] = nn.ModuleList(UpperCAmelCase_)
# Transformer blocks
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Union[str, Any] = 0
for i in range(config.num_encoder_blocks):
# each block consists of layers
UpperCamelCase__ : Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i]):
layers.append(
PoolFormerLayer(
UpperCAmelCase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio) , drop_path=dpr[cur + j] , ))
blocks.append(nn.ModuleList(UpperCAmelCase_))
UpperCamelCase__ : Union[str, Any] = nn.ModuleList(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=True):
UpperCamelCase__ : Optional[int] = () if output_hidden_states else None
UpperCamelCase__ : List[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block)):
UpperCamelCase__, UpperCamelCase__ : Optional[int] = layers
# Get patch embeddings from hidden_states
UpperCamelCase__ : str = embedding_layer(UpperCAmelCase_)
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCAmelCase_):
UpperCamelCase__ : str = blk(UpperCAmelCase_)
UpperCamelCase__ : Any = layer_outputs[0]
if output_hidden_states:
UpperCamelCase__ : Optional[int] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = PoolFormerConfig
_lowerCamelCase = '''poolformer'''
_lowerCamelCase = '''pixel_values'''
_lowerCamelCase = True
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Dict):
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=False):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = value
lowerCAmelCase__ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __lowerCamelCase , )
class __lowercase (__lowerCamelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : int):
super().__init__(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = config
UpperCamelCase__ : Optional[Any] = PoolFormerEncoder(UpperCAmelCase_)
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : int):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
UpperCamelCase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
UpperCamelCase__ : Tuple = self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )
UpperCamelCase__ : Dict = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple):
super().__init__()
UpperCamelCase__ : List[str] = nn.Linear(config.hidden_size , config.hidden_size)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Optional[Any] = self.dense(UpperCAmelCase_)
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __lowerCamelCase , )
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict):
super().__init__(UpperCAmelCase_)
UpperCamelCase__ : Dict = config.num_labels
UpperCamelCase__ : int = PoolFormerModel(UpperCAmelCase_)
# Final norm
UpperCamelCase__ : int = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
UpperCamelCase__ : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ):
UpperCamelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : int = self.poolformer(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = outputs[0]
UpperCamelCase__ : Optional[Any] = self.classifier(self.norm(UpperCAmelCase_).mean([-2, -1]))
UpperCamelCase__ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : List[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : List[Any] = 'single_label_classification'
else:
UpperCamelCase__ : List[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase__ : Any = MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Tuple = loss_fct(logits.squeeze() , labels.squeeze())
else:
UpperCamelCase__ : str = loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[Any] = CrossEntropyLoss()
UpperCamelCase__ : List[Any] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[int] = BCEWithLogitsLoss()
UpperCamelCase__ : Union[str, Any] = loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
UpperCamelCase__ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states)
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/'))
UpperCamelCase__ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase_ , 'src/diffusers/schedulers/scheduling_ddpm.py') , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py') , )
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None):
UpperCamelCase__ : Optional[int] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCamelCase__ : List[str] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCamelCase__ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
UpperCamelCase__ : Tuple = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_)
UpperCamelCase__ : List[str] = os.path.join(self.diffusers_dir , 'new_code.py')
with open(UpperCAmelCase_ , 'w' , newline='\n') as f:
f.write(UpperCAmelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase_)
with open(UpperCAmelCase_ , 'r') as f:
self.assertTrue(f.read() , UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput')
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , UpperCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , UpperCAmelCase_) , )
# Copy consistency with a really long name
UpperCamelCase__ : List[str] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('Bert' , UpperCAmelCase_ , UpperCAmelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , UpperCAmelCase_ , overwrite_result=re.sub('DDPM' , 'Test' , UpperCAmelCase_) , )
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''pegasus'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , UpperCAmelCase_ : Optional[int]=50_265 , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Optional[int]=4_096 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Dict=4_096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]=1 , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : List[Any] = d_model
UpperCamelCase__ : Tuple = encoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : str = encoder_attention_heads
UpperCamelCase__ : Optional[int] = decoder_ffn_dim
UpperCamelCase__ : Dict = decoder_layers
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : Dict = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : Union[str, Any] = activation_function
UpperCamelCase__ : str = init_std
UpperCamelCase__ : Union[str, Any] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : Any = encoder_layers
UpperCamelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def __UpperCamelCase ( self : Dict):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : List[Any]):
return self.d_model
| 6 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = MBartTokenizer
_lowerCamelCase = []
_lowerCamelCase = []
def __init__( self : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]="<s>" , UpperCAmelCase_ : Union[str, Any]="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Tuple="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Union[str, Any] = vocab_file
UpperCamelCase__ : Dict = False if not self.vocab_file else True
UpperCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens})
UpperCamelCase__ : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase__ : Tuple = src_lang if src_lang is not None else 'en_XX'
UpperCamelCase__ : Dict = self.convert_tokens_to_ids(self._src_lang)
UpperCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def __UpperCamelCase ( self : Any):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
UpperCamelCase__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Optional[int]):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
UpperCamelCase__ : int = src_lang
UpperCamelCase__ : Tuple = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Dict = self.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tgt_lang_id
return inputs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "en_XX" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "ro_RO" , **UpperCAmelCase_ : Dict , ):
UpperCamelCase__ : List[Any] = src_lang
UpperCamelCase__ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
return self.set_src_lang_special_tokens(self.src_lang)
def __UpperCamelCase ( self : Optional[Any]):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Any):
UpperCamelCase__ : Any = self.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCamelCase__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Tuple = self.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
UpperCamelCase__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCamelCase__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCamelCase__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
return (out_vocab_file,)
| 6 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''marian'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , UpperCAmelCase_ : Optional[int]=58_101 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=1_024 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=4_096 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Any=4_096 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Tuple=1_024 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[Any]=58_100 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[int]=58_100 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : Optional[int] , ):
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : str = decoder_vocab_size or vocab_size
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = d_model
UpperCamelCase__ : Optional[int] = encoder_ffn_dim
UpperCamelCase__ : Optional[Any] = encoder_layers
UpperCamelCase__ : Union[str, Any] = encoder_attention_heads
UpperCamelCase__ : Optional[int] = decoder_ffn_dim
UpperCamelCase__ : List[str] = decoder_layers
UpperCamelCase__ : Optional[int] = decoder_attention_heads
UpperCamelCase__ : Dict = dropout
UpperCamelCase__ : str = attention_dropout
UpperCamelCase__ : Tuple = activation_dropout
UpperCamelCase__ : Optional[int] = activation_function
UpperCamelCase__ : Any = init_std
UpperCamelCase__ : Optional[Any] = encoder_layerdrop
UpperCamelCase__ : int = decoder_layerdrop
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : Tuple = encoder_layers
UpperCamelCase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ : Union[str, Any] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class __lowercase (__lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCamelCase ( self : List[str]):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
UpperCamelCase__ : Optional[int] = {0: 'batch'}
UpperCamelCase__ : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase__ : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
UpperCamelCase__, UpperCamelCase__ : Any = self.num_layers
for i in range(UpperCAmelCase_):
UpperCamelCase__ : str = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase__ : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase__ : Dict = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCamelCase ( self : List[str]):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : int = super().outputs
else:
UpperCamelCase__ : Optional[Any] = super(UpperCAmelCase_ , self).outputs
if self.use_past:
UpperCamelCase__, UpperCamelCase__ : List[str] = self.num_layers
for i in range(UpperCAmelCase_):
UpperCamelCase__ : str = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase__ : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
UpperCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Generate decoder inputs
UpperCamelCase__ : Tuple = seq_length if not self.use_past else 1
UpperCamelCase__ : int = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ : int = dict(**UpperCAmelCase_ , **UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
UpperCamelCase__, UpperCamelCase__ : List[str] = common_inputs['input_ids'].shape
UpperCamelCase__ : Optional[Any] = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase__, UpperCamelCase__ : List[str] = self.num_attention_heads
UpperCamelCase__ : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : Any = decoder_seq_length + 3
UpperCamelCase__ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ : int = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.num_layers
UpperCamelCase__ : Optional[Any] = min(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = max(UpperCAmelCase_ , UpperCAmelCase_) - min_num_layers
UpperCamelCase__ : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
))
# TODO: test this.
UpperCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)))
return common_inputs
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
UpperCamelCase__ : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase__ : str = seqlen + 2
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.num_layers
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.num_attention_heads
UpperCamelCase__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : str = common_inputs['attention_mask'].dtype
UpperCamelCase__ : int = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : Optional[int] = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(UpperCAmelCase_)
]
return common_inputs
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ : Any = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
UpperCamelCase__ : List[str] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ : Tuple = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
UpperCamelCase__ : int = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return common_inputs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
return common_inputs
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Any = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
else:
UpperCamelCase__ : List[str] = super(UpperCAmelCase_ , self)._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Union[str, Any]):
return 1e-4
| 6 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : str = b.T
UpperCamelCase__ : Union[str, Any] = np.sum(np.square(lowerCamelCase_) , axis=1)
UpperCamelCase__ : int = np.sum(np.square(lowerCamelCase_) , axis=0)
UpperCamelCase__ : int = np.matmul(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Tuple = x.reshape(-1 , 3)
UpperCamelCase__ : Dict = squared_euclidean_distance(lowerCamelCase_ , lowerCamelCase_)
return np.argmin(lowerCamelCase_ , axis=1)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''pixel_values''']
def __init__( self : str , UpperCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_)
UpperCamelCase__ : Dict = size if size is not None else {'height': 256, 'width': 256}
UpperCamelCase__ : int = get_size_dict(UpperCAmelCase_)
UpperCamelCase__ : Tuple = np.array(UpperCAmelCase_) if clusters is not None else None
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : Optional[Any] = resample
UpperCamelCase__ : Union[str, Any] = do_normalize
UpperCamelCase__ : Tuple = do_color_quantize
def __UpperCamelCase ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}')
return resize(
UpperCAmelCase_ , size=(size['height'], size['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , ):
UpperCamelCase__ : Optional[int] = rescale(image=UpperCAmelCase_ , scale=1 / 1_27.5 , data_format=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = image - 1
return image
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : str = size if size is not None else self.size
UpperCamelCase__ : Dict = get_size_dict(UpperCAmelCase_)
UpperCamelCase__ : int = resample if resample is not None else self.resample
UpperCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase__ : Union[str, Any] = clusters if clusters is not None else self.clusters
UpperCamelCase__ : str = np.array(UpperCAmelCase_)
UpperCamelCase__ : List[str] = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
UpperCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
UpperCamelCase__ : Tuple = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_normalize:
UpperCamelCase__ : Union[str, Any] = [self.normalize(image=UpperCAmelCase_) for image in images]
if do_color_quantize:
UpperCamelCase__ : Tuple = [to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase__ : Optional[Any] = np.array(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = color_quantize(UpperCAmelCase_ , UpperCAmelCase_).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
UpperCamelCase__ : Optional[int] = images.shape[0]
UpperCamelCase__ : int = images.reshape(UpperCAmelCase_ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase__ : Union[str, Any] = list(UpperCAmelCase_)
else:
UpperCamelCase__ : int = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
UpperCamelCase__ : Tuple = {'input_ids': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 6 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import json
import sys
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
with open(lowerCamelCase_ , encoding='utf-8') as f:
UpperCamelCase__ : Union[str, Any] = json.load(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = results[benchmark_name]
UpperCamelCase__ : Union[str, Any] = benchmark_name.split('/')[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}')
UpperCamelCase__ : Union[str, Any] = '| metric |'
UpperCamelCase__ : List[str] = '|--------|'
UpperCamelCase__ : str = '| new / old (diff) |'
for metric_name in sorted(lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = benchmark_res[metric_name]
UpperCamelCase__ : Optional[int] = metric_vals['new']
UpperCamelCase__ : int = metric_vals.get('old' , lowerCamelCase_)
UpperCamelCase__ : Dict = metric_vals.get('diff' , lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = f' {new_val:f}' if isinstance(lowerCamelCase_ , (int, float)) else 'None'
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(lowerCamelCase_ , (int, float)) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(lowerCamelCase_ , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>')
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as f:
f.writelines('\n'.join(lowerCamelCase_))
if __name__ == "__main__":
lowerCAmelCase__ = sys.argv[1]
lowerCAmelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 6 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''AutoImageProcessor'''
_lowerCamelCase = '''AutoTokenizer'''
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Dict = self.image_processor
def __call__( self : List[Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Any):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
UpperCamelCase__ : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
UpperCamelCase__ : Optional[Any] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
UpperCamelCase__ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Any):
return ["input_ids", "attention_mask", "pixel_values"]
| 6 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (IPNDMScheduler,)
_lowerCamelCase = (('''num_inference_steps''', 50),)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : str = {'num_train_timesteps': 1_000}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : str):
UpperCamelCase__ : Dict = dict(self.forward_default_kwargs)
UpperCamelCase__ : Optional[int] = kwargs.pop('num_inference_steps' , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_sample
UpperCamelCase__ : Union[str, Any] = 0.1 * sample
UpperCamelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Tuple = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(UpperCAmelCase_)
# copy over dummy past residuals
UpperCamelCase__ : List[Any] = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__ : Tuple = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = scheduler_class.from_pretrained(UpperCAmelCase_)
new_scheduler.set_timesteps(UpperCAmelCase_)
# copy over dummy past residuals
UpperCamelCase__ : Optional[int] = dummy_past_residuals[:]
UpperCamelCase__ : Union[str, Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : str = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ : Optional[int] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : Tuple = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : List[str]):
pass
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[str]=0 , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = dict(self.forward_default_kwargs)
UpperCamelCase__ : List[str] = kwargs.pop('num_inference_steps' , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.dummy_sample
UpperCamelCase__ : str = 0.1 * sample
UpperCamelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : List[str] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(UpperCAmelCase_)
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : int = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_)
UpperCamelCase__ : int = scheduler_class.from_pretrained(UpperCAmelCase_)
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_)
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Tuple = dummy_past_residuals[:]
UpperCamelCase__ : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : Dict = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : List[str] = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : List[Any] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : int = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__ : List[str] = 10
UpperCamelCase__ : List[Any] = self.dummy_model()
UpperCamelCase__ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase__ : str = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
for i, t in enumerate(scheduler.timesteps):
UpperCamelCase__ : Any = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = dict(self.forward_default_kwargs)
UpperCamelCase__ : List[Any] = kwargs.pop('num_inference_steps' , UpperCAmelCase_)
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Any = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.dummy_sample
UpperCamelCase__ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ , 'set_timesteps'):
scheduler.set_timesteps(UpperCAmelCase_)
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , 'set_timesteps'):
UpperCamelCase__ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ : Union[str, Any] = dummy_past_residuals[:]
UpperCamelCase__ : Dict = scheduler.timesteps[5]
UpperCamelCase__ : List[str] = scheduler.timesteps[6]
UpperCamelCase__ : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCamelCase__ : int = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
UpperCamelCase__ : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __UpperCamelCase ( self : str):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ , time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_mean.item() - 2_540_529) < 10
| 6 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''table-transformer'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=100 , UpperCAmelCase_ : Tuple=6 , UpperCAmelCase_ : List[str]=2_048 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Tuple=2_048 , UpperCAmelCase_ : Optional[int]=8 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str="relu" , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int="sine" , UpperCAmelCase_ : Dict="resnet50" , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : str=5 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Tuple=0.1 , **UpperCAmelCase_ : Tuple , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
UpperCamelCase__ : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = backbone_config.get('model_type')
UpperCamelCase__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase_)
# set timm attributes to None
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = None, None, None
UpperCamelCase__ : Any = use_timm_backbone
UpperCamelCase__ : str = backbone_config
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Optional[Any] = num_queries
UpperCamelCase__ : List[str] = d_model
UpperCamelCase__ : List[Any] = encoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : Dict = encoder_attention_heads
UpperCamelCase__ : str = decoder_ffn_dim
UpperCamelCase__ : Any = decoder_layers
UpperCamelCase__ : Optional[Any] = decoder_attention_heads
UpperCamelCase__ : Optional[int] = dropout
UpperCamelCase__ : Tuple = attention_dropout
UpperCamelCase__ : Tuple = activation_dropout
UpperCamelCase__ : Dict = activation_function
UpperCamelCase__ : int = init_std
UpperCamelCase__ : int = init_xavier_std
UpperCamelCase__ : Optional[int] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : Any = encoder_layers
UpperCamelCase__ : Union[str, Any] = auxiliary_loss
UpperCamelCase__ : Optional[int] = position_embedding_type
UpperCamelCase__ : Any = backbone
UpperCamelCase__ : Optional[int] = use_pretrained_backbone
UpperCamelCase__ : Tuple = dilation
# Hungarian matcher
UpperCamelCase__ : List[Any] = class_cost
UpperCamelCase__ : Any = bbox_cost
UpperCamelCase__ : str = giou_cost
# Loss coefficients
UpperCamelCase__ : List[Any] = mask_loss_coefficient
UpperCamelCase__ : Optional[int] = dice_loss_coefficient
UpperCamelCase__ : str = bbox_loss_coefficient
UpperCamelCase__ : Union[str, Any] = giou_loss_coefficient
UpperCamelCase__ : Any = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Any):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : List[Any]):
return self.d_model
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : List[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __UpperCamelCase ( self : Dict):
return 1e-5
@property
def __UpperCamelCase ( self : List[Any]):
return 12
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import math
import sys
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if number != int(lowerCamelCase_):
raise ValueError('the value of input must be a natural number')
if number < 0:
raise ValueError('the value of input must not be a negative number')
if number == 0:
return 1
UpperCamelCase__ : Optional[int] = [-1] * (number + 1)
UpperCamelCase__ : Any = 0
for i in range(1 , number + 1):
UpperCamelCase__ : Union[str, Any] = sys.maxsize
UpperCamelCase__ : List[str] = int(math.sqrt(lowerCamelCase_))
for j in range(1 , root + 1):
UpperCamelCase__ : str = 1 + answers[i - (j**2)]
UpperCamelCase__ : int = min(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowercase (__lowerCamelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : int = data
def __iter__( self : Optional[Any]):
for element in self.data:
yield element
def __UpperCAmelCase ( lowerCamelCase_=True) -> List[str]:
UpperCamelCase__ : List[str] = Accelerator(even_batches=lowerCamelCase_)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False) -> Any:
if iterable:
UpperCamelCase__ : Tuple = DummyIterableDataset(torch.as_tensor(range(lowerCamelCase_)))
else:
UpperCamelCase__ : List[str] = TensorDataset(torch.as_tensor(range(lowerCamelCase_)))
UpperCamelCase__ : Any = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_)
UpperCamelCase__ : List[str] = accelerator.prepare(lowerCamelCase_)
return dl
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
UpperCamelCase__ : Optional[int] = create_dataloader(accelerator=lowerCamelCase_ , dataset_size=lowerCamelCase_ , batch_size=lowerCamelCase_)
UpperCamelCase__ : Any = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __UpperCAmelCase ( ) -> str:
UpperCamelCase__ : Any = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __UpperCAmelCase ( ) -> Tuple:
UpperCamelCase__ : int = create_accelerator(even_batches=lowerCamelCase_)
verify_dataloader_batch_sizes(
lowerCamelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCamelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __UpperCAmelCase ( ) -> str:
UpperCamelCase__ : List[str] = create_accelerator(even_batches=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.nn.Linear(1 , 1)
UpperCamelCase__ : Union[str, Any] = accelerator.prepare(lowerCamelCase_)
UpperCamelCase__ : List[str] = create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1)
UpperCamelCase__ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(lowerCamelCase_):
UpperCamelCase__ : List[Any] = ddp_model(batch[0].float())
UpperCamelCase__ : Any = output.sum()
loss.backward()
batch_idxs.append(lowerCamelCase_)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
with warnings.catch_warnings(record=lowerCamelCase_) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category , lowerCamelCase_)
assert "only supported for multi-GPU" in str(w[-1].message)
def __UpperCAmelCase ( ) -> List[Any]:
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : str = create_accelerator(even_batches=lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = torch.nn.Linear(1 , 1)
UpperCamelCase__ : List[Any] = accelerator.prepare(lowerCamelCase_)
UpperCamelCase__ : int = create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1)
UpperCamelCase__ : Optional[Any] = create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase_):
UpperCamelCase__ : Dict = train_dl.batch_sampler.even_batches
UpperCamelCase__ : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __UpperCAmelCase ( ) -> List[str]:
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Dict = create_accelerator(even_batches=lowerCamelCase_)
UpperCamelCase__ : Dict = torch.nn.Linear(1 , 1)
UpperCamelCase__ : List[str] = accelerator.prepare(lowerCamelCase_)
create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase_):
UpperCamelCase__ : Union[str, Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = create_accelerator()
UpperCamelCase__ : Tuple = torch.nn.Linear(1 , 1)
UpperCamelCase__ : Optional[Any] = accelerator.prepare(lowerCamelCase_)
create_dataloader(lowerCamelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCamelCase_)
with warnings.catch_warnings(record=lowerCamelCase_) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCamelCase_):
pass
assert issubclass(w[-1].category , lowerCamelCase_)
assert "only supported for map-style datasets" in str(w[-1].message)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes')
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled')
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs')
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs')
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types')
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders')
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning')
UpperCamelCase__ : Optional[Any] = accelerator.state.distributed_type
UpperCamelCase__ : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = original_state
if __name__ == "__main__":
main()
| 6 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Optional[int] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__, UpperCamelCase__ : int = emb.weight.shape
UpperCamelCase__ : Optional[Any] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_)
UpperCamelCase__ : int = emb.weight.data
return lin_layer
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : str = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : Union[str, Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
UpperCamelCase__ : Dict = mam_aaa['model']
remove_ignore_keys_(lowerCamelCase_)
UpperCamelCase__ : str = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCamelCase__ : Tuple = MaMaaaConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
UpperCamelCase__ : List[Any] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase__ : Tuple = MaMaaaForConditionalGeneration(lowerCamelCase_)
model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_)
UpperCamelCase__ : int = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowerCAmelCase__ = 'bert-base-cased'
lowerCAmelCase__ = 'fp16'
lowerCAmelCase__ = 'bf16'
lowerCAmelCase__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Dict):
super().setUp()
UpperCamelCase__ : str = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def __UpperCamelCase ( self : Dict):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCAmelCase_):
UpperCamelCase__ : Any = self.dist_env.copy()
UpperCamelCase__ : Optional[int] = F'{i + 1}'
UpperCamelCase__ : List[str] = strategy
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1))
def __UpperCamelCase ( self : Optional[int]):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCAmelCase_):
UpperCamelCase__ : Tuple = self.dist_env.copy()
UpperCamelCase__ : int = prefetch_policy
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch)
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1))
def __UpperCamelCase ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCAmelCase_):
UpperCamelCase__ : List[str] = self.dist_env.copy()
UpperCamelCase__ : Dict = state_dict_type
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1))
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu)
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Union[str, Any] = AutoModel.from_pretrained(UpperCAmelCase_)
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ : Optional[int] = self.dist_env.copy()
UpperCamelCase__ : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ : List[Any] = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ : int = '2000'
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_)
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy)
UpperCamelCase__ : int = self.dist_env.copy()
UpperCamelCase__ : List[str] = 'TRANSFORMER_BASED_WRAP'
UpperCamelCase__ : List[str] = 'T5Layer'
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCAmelCase_) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_)
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception))
UpperCamelCase__ : Optional[Any] = self.dist_env.copy()
UpperCamelCase__ : int = 'SIZE_BASED_WRAP'
UpperCamelCase__ : Optional[int] = '0'
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCAmelCase_)
self.assertIsNone(fsdp_plugin.auto_wrap_policy)
def __UpperCamelCase ( self : Any):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ : Union[str, Any] = self.dist_env.copy()
UpperCamelCase__ : Optional[int] = mp_dtype
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : int = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ : Optional[Any] = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ : int = torch.bfloataa
UpperCamelCase__ : List[str] = MixedPrecision(param_dtype=UpperCAmelCase_ , reduce_dtype=UpperCAmelCase_ , buffer_dtype=UpperCAmelCase_)
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCAmelCase_)
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCAmelCase_))
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler)
AcceleratorState._reset_state(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ : str = self.dist_env.copy()
UpperCamelCase__ : List[str] = str(UpperCAmelCase_).lower()
with mockenv_context(**UpperCAmelCase_):
UpperCamelCase__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCAmelCase_))
@require_fsdp
@require_multi_gpu
@slow
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Dict):
super().setUp()
UpperCamelCase__ : List[Any] = 0.82
UpperCamelCase__ : Optional[Any] = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
UpperCamelCase__ : List[Any] = {
'multi_gpu_fp16': 3_200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ : Tuple = 160
UpperCamelCase__ : str = 160
UpperCamelCase__ : List[Any] = inspect.getfile(accelerate.test_utils)
UpperCamelCase__ : int = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps'])
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : str = os.path.join(self.test_scripts_folder , 'test_performance.py')
UpperCamelCase__ : Tuple = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
UpperCamelCase__ : int = cmd.copy()
for i, strategy in enumerate(UpperCAmelCase_):
if strategy.lower() in config:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no')
else:
cmd_config.append('--mixed_precision=fp16')
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--performance_lower_bound={self.performance_lower_bound}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = os.path.join(self.test_scripts_folder , 'test_checkpointing.py')
UpperCamelCase__ : int = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(UpperCAmelCase_):
UpperCamelCase__ : Tuple = cmd.copy()
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ : int = len(UpperCAmelCase_)
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ : Union[str, Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'--fsdp_state_dict_type={state_dict_type}')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
'--partial_train_epoch=1',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
UpperCamelCase__ : Optional[Any] = cmd_config[:-1]
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdir , 'epoch_0')
cmd_config.extend(
[
F'--resume_from_checkpoint={resume_from_checkpoint}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py')
UpperCamelCase__ : List[str] = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ : int = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'])
else:
cmd_config.extend(['--mixed_precision=no'])
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'])
for i, strategy in enumerate(UpperCAmelCase_):
if strategy.lower() in spec:
cmd_config.append(F'--fsdp_sharding_strategy={i+1}')
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True')
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'--fsdp_auto_wrap_policy={policy}')
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer')
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000')
cmd_config.extend(
[
self.test_file_path,
F'--output_dir={self.tmpdir}',
F'--peak_memory_upper_bound={peak_mem_upper_bound}',
F'--n_train={self.n_train}',
F'--n_val={self.n_val}',
])
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 6 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = LongformerTokenizer
_lowerCamelCase = True
_lowerCamelCase = LongformerTokenizerFast
_lowerCamelCase = True
def __UpperCamelCase ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase__ : Optional[int] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ : List[str] = {'unk_token': '<unk>'}
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def __UpperCamelCase ( self : int , **UpperCAmelCase_ : int):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Dict):
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : str = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ : int = tokenizer.tokenize(UpperCAmelCase_) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 2])
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCAmelCase_) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096')
UpperCamelCase__ : str = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Union[str, Any] = 'Encode this sequence.'
UpperCamelCase__ : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8')[0]]
# Testing encoder arguments
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[0])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
tokenizer.add_special_tokens({'bos_token': '<s>'})
UpperCamelCase__ : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.convert_ids_to_tokens(encoded[1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Testing spaces after special tokens
UpperCamelCase__ : Tuple = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_)}) # mask token has a left space
UpperCamelCase__ : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'Encode <mask> sequence'
UpperCamelCase__ : Optional[Any] = 'Encode <mask>sequence'
UpperCamelCase__ : List[str] = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.encode(UpperCAmelCase_)
UpperCamelCase__ : Any = encoded.index(UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0]
self.assertNotEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
pass
def __UpperCamelCase ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = 'A, <mask> AllenNLP sentence.'
UpperCamelCase__ : Any = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
UpperCamelCase__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
UpperCamelCase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
def __UpperCamelCase ( self : Tuple):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2):
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__())
UpperCamelCase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__())
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCAmelCase_)
self.assertEqual(post_processor_state['trim_offsets'] , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
UpperCamelCase__ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ : str = F'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : str = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_), len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : Dict = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_) + 1, 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Dict = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase_)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_), 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , )
| 6 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> List[str]:
try:
UpperCamelCase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ : Any = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ : Union[str, Any] = strtobool(lowerCamelCase_)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.')
return _value
lowerCAmelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
return unittest.skip('Test was skipped')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return unittest.skipUnless(_run_slow_tests , 'test is slow')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0') , 'test requires torch version >= 1.12.0')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_=None , lowerCamelCase_=None) -> Any:
if test_case is None:
return partial(lowerCamelCase_ , version=lowerCamelCase_)
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase_) , f'test requires torch version >= {version}')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[int]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb')(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml')(lowerCamelCase_)
lowerCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase_)
class __lowercase (unittest.TestCase ):
_lowerCamelCase = True
@classmethod
def __UpperCamelCase ( cls : Tuple):
UpperCamelCase__ : Union[str, Any] = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : Any):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def __UpperCamelCase ( self : Dict):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Dict):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[mock.Mock, List[mock.Mock]]):
UpperCamelCase__ : Dict = mocks if isinstance(UpperCAmelCase_ , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : Any = AcceleratorState()
UpperCamelCase__ : List[Any] = tensor[None].clone().to(state.device)
UpperCamelCase__ : List[str] = gather(lowerCamelCase_).cpu()
UpperCamelCase__ : int = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , lowerCamelCase_):
return False
return True
class __lowercase :
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Optional[int] = returncode
UpperCamelCase__ : Any = stdout
UpperCamelCase__ : Union[str, Any] = stderr
async def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
while True:
UpperCamelCase__ : Optional[int] = await stream.readline()
if line:
callback(lowerCamelCase_)
else:
break
async def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase_))
UpperCamelCase__ : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : int = []
def tee(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=""):
UpperCamelCase__ : Union[str, Any] = line.decode('utf-8').rstrip()
sink.append(lowerCamelCase_)
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase_: tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label='stdout:'))),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase_: tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label='stderr:'))),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=180 , lowerCamelCase_=False , lowerCamelCase_=True) -> _RunOutput:
UpperCamelCase__ : Union[str, Any] = asyncio.get_event_loop()
UpperCamelCase__ : Tuple = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_))
UpperCamelCase__ : Union[str, Any] = ' '.join(lowerCamelCase_)
if result.returncode > 0:
UpperCamelCase__ : int = '\n'.join(result.stderr)
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}')
return result
class __lowercase (__lowerCamelCase ):
pass
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=False) -> Dict:
try:
UpperCamelCase__ : int = subprocess.check_output(lowerCamelCase_ , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(lowerCamelCase_ , 'decode'):
UpperCamelCase__ : int = output.decode('utf-8')
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCamelCase_)}` failed with the following error:\n\n{e.output.decode()}') from e
| 6 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowercase (__lowerCamelCase ):
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Tuple = SMALL_MODEL_IDENTIFIER
UpperCamelCase__ : Dict = 'pt'
UpperCamelCase__ : List[str] = 'tf'
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int):
UpperCamelCase__ : str = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCAmelCase_)
model_tf.save_pretrained(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : List[str] = 'mock_framework'
# Framework provided - return whatever the user provides
UpperCamelCase__ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_)
UpperCamelCase__ : int = FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_)
UpperCamelCase__ : Any = FeaturesManager.determine_framework(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_)
UpperCamelCase__ : Any = FeaturesManager.determine_framework(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : Tuple = FeaturesManager.determine_framework(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = MagicMock(return_value=UpperCAmelCase_)
with patch('transformers.onnx.features.is_tf_available' , UpperCAmelCase_):
UpperCamelCase__ : Dict = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(UpperCAmelCase_ , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
UpperCamelCase__ : List[Any] = MagicMock(return_value=UpperCAmelCase_)
with patch('transformers.onnx.features.is_torch_available' , UpperCAmelCase_):
UpperCamelCase__ : Any = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(UpperCAmelCase_ , self.framework_tf)
# Both in environment -> use PyTorch
UpperCamelCase__ : Dict = MagicMock(return_value=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = MagicMock(return_value=UpperCAmelCase_)
with patch('transformers.onnx.features.is_tf_available' , UpperCAmelCase_), patch(
'transformers.onnx.features.is_torch_available' , UpperCAmelCase_):
UpperCamelCase__ : int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(UpperCAmelCase_ , self.framework_pt)
# Both not in environment -> raise error
UpperCamelCase__ : Optional[Any] = MagicMock(return_value=UpperCAmelCase_)
UpperCamelCase__ : Tuple = MagicMock(return_value=UpperCAmelCase_)
with patch('transformers.onnx.features.is_tf_available' , UpperCAmelCase_), patch(
'transformers.onnx.features.is_torch_available' , UpperCAmelCase_):
with self.assertRaises(UpperCAmelCase_):
UpperCamelCase__ : List[str] = FeaturesManager.determine_framework(self.test_model)
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( ) -> Optional[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase__ : List[str] = os.getenv('SM_HP_MP_PARAMETERS' , '{}')
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase__ : str = json.loads(lowerCamelCase_)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase__ : Optional[Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}')
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase__ : Optional[int] = json.loads(lowerCamelCase_)
if not mpi_options.get('sagemaker_mpi_enabled' , lowerCamelCase_):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed') is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def __UpperCamelCase ( self : str):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , UpperCAmelCase_ , )
@cached_property
def __UpperCamelCase ( self : Dict):
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
UpperCamelCase__ : Tuple = torch.device('cpu')
UpperCamelCase__ : Tuple = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase__ : str = smp.local_rank()
UpperCamelCase__ : List[Any] = torch.device('cuda' , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
UpperCamelCase__ : Dict = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
UpperCamelCase__ : List[Any] = torch.device('cuda' , self.local_rank)
UpperCamelCase__ : int = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase__ : str = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase__ : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
UpperCamelCase__ : Dict = torch.device('cuda' , self.local_rank)
UpperCamelCase__ : List[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase_)
return device
@property
def __UpperCamelCase ( self : Dict):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : Optional[int]):
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : Optional[Any]):
return False
| 6 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCAmelCase__ = {
'google/rembert': 256,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Tuple="[SEP]" , UpperCAmelCase_ : Tuple="[UNK]" , UpperCAmelCase_ : Optional[int]="[SEP]" , UpperCAmelCase_ : Any="[PAD]" , UpperCAmelCase_ : Tuple="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = do_lower_case
UpperCamelCase__ : List[Any] = remove_space
UpperCamelCase__ : Any = keep_accents
UpperCamelCase__ : Tuple = vocab_file
UpperCamelCase__ : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : List[str]):
return len(self.sp_model)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.__dict__.copy()
UpperCamelCase__ : Union[str, Any] = None
return state
def __setstate__( self : List[str] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = d
UpperCamelCase__ : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False):
UpperCamelCase__ : Dict = self.sp_model.EncodeAsPieces(UpperCAmelCase_)
return pieces
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple):
return self.sp_model.PieceToId(UpperCAmelCase_)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[str]):
return self.sp_model.IdToPiece(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Tuple = self.sp_model.decode_pieces(UpperCAmelCase_)
return out_string
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[str] = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase_))
return
UpperCamelCase__ : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
return (out_vocab_file,)
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''token-classification'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
if type(UpperCAmelCase_) == dict:
UpperCamelCase__ : List[str] = Namespace(**UpperCAmelCase_)
UpperCamelCase__ : Any = import_module('tasks')
try:
UpperCamelCase__ : Optional[int] = getattr(UpperCAmelCase_ , hparams.task_type)
UpperCamelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
UpperCamelCase__ : List[str] = self.token_classification_task.get_labels(hparams.labels)
UpperCamelCase__ : Optional[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase_ , len(self.labels) , self.mode)
def __UpperCamelCase ( self : int , **UpperCAmelCase_ : List[str]):
return self.model(**UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Dict = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : Optional[Any] = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : int = self(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ : List[str] = self._feature_file(UpperCAmelCase_)
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.load(UpperCAmelCase_)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
UpperCamelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(self.config.model_type in ['xlnet']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , UpperCAmelCase_)
torch.save(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False):
UpperCamelCase__ : Optional[int] = self._feature_file(UpperCAmelCase_)
logger.info('Loading features from cached file %s' , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = torch.load(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
UpperCamelCase__ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
UpperCamelCase__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
UpperCamelCase__ : int = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
UpperCamelCase__ : str = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , batch_size=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple):
"""Compute validation""" ""
UpperCamelCase__ : Union[str, Any] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ : List[str] = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ : Any = self(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = outputs[:2]
UpperCamelCase__ : Optional[Any] = logits.detach().cpu().numpy()
UpperCamelCase__ : Dict = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Optional[int] = torch.stack([x['val_loss'] for x in outputs]).mean()
UpperCamelCase__ : Tuple = np.concatenate([x['pred'] for x in outputs] , axis=0)
UpperCamelCase__ : List[Any] = np.argmax(UpperCAmelCase_ , axis=2)
UpperCamelCase__ : Dict = np.concatenate([x['target'] for x in outputs] , axis=0)
UpperCamelCase__ : Union[str, Any] = dict(enumerate(self.labels))
UpperCamelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Optional[int] = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
UpperCamelCase__ : Any = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(UpperCAmelCase_ , UpperCAmelCase_),
'precision': precision_score(UpperCAmelCase_ , UpperCAmelCase_),
'recall': recall_score(UpperCAmelCase_ , UpperCAmelCase_),
'f1': fa_score(UpperCAmelCase_ , UpperCAmelCase_),
}
UpperCamelCase__ : Any = dict(results.items())
UpperCamelCase__ : Tuple = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
# when stable
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self._eval_end(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[str]):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[int] = self._eval_end(UpperCAmelCase_)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ : Any = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]):
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_)
parser.add_argument(
'--task_type' , default='NER' , type=UpperCAmelCase_ , help='Task type to fine tune in training (e.g. NER, POS, etc)')
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=UpperCAmelCase_ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=UpperCAmelCase_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = NERTransformer(args)
lowerCAmelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 6 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Union[str, Any]="last" , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , ):
UpperCamelCase__ : int = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Tuple = seq_length
UpperCamelCase__ : Tuple = is_training
UpperCamelCase__ : Union[str, Any] = use_input_lengths
UpperCamelCase__ : Any = use_token_type_ids
UpperCamelCase__ : List[Any] = use_labels
UpperCamelCase__ : Optional[Any] = gelu_activation
UpperCamelCase__ : Union[str, Any] = sinusoidal_embeddings
UpperCamelCase__ : Union[str, Any] = causal
UpperCamelCase__ : Optional[Any] = asm
UpperCamelCase__ : Union[str, Any] = n_langs
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : int = n_special
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Optional[Any] = type_sequence_label_size
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : int = num_labels
UpperCamelCase__ : Optional[Any] = num_choices
UpperCamelCase__ : Dict = summary_type
UpperCamelCase__ : int = use_proj
UpperCamelCase__ : Optional[int] = scope
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Any = None
if self.use_input_lengths:
UpperCamelCase__ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCamelCase__ : str = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size] , 2).float()
UpperCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase__ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : Union[str, Any]):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , ):
UpperCamelCase__ : str = FlaubertModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
UpperCamelCase__ : Any = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , ):
UpperCamelCase__ : List[Any] = FlaubertWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , ):
UpperCamelCase__ : List[Any] = FlaubertForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[Any] = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , ):
UpperCamelCase__ : str = FlaubertForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[int] = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((UpperCamelCase__), ) : Tuple = result_with_labels.to_tuple()
UpperCamelCase__ : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((UpperCamelCase__), ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , ):
UpperCamelCase__ : str = FlaubertForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Tuple = model(UpperCAmelCase_)
UpperCamelCase__ : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , ):
UpperCamelCase__ : List[Any] = self.num_labels
UpperCamelCase__ : Optional[Any] = FlaubertForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : Optional[Any] = self.num_choices
UpperCamelCase__ : List[Any] = FlaubertForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) : str = config_and_inputs
UpperCamelCase__ : Optional[Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=False):
UpperCamelCase__ : str = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCamelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : str = FlaubertModelTester(self)
UpperCamelCase__ : int = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=37)
def __UpperCamelCase ( self : Optional[Any]):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCAmelCase_)
@slow
def __UpperCamelCase ( self : Union[str, Any]):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : int = FlaubertModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__, UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCamelCase__ : str = True
UpperCamelCase__ : List[Any] = model_class(config=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = torch.jit.trace(
UpperCAmelCase_ , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , 'traced_model.pt'))
UpperCamelCase__ : List[str] = torch.jit.load(os.path.join(UpperCAmelCase_ , 'traced_model.pt') , map_location=UpperCAmelCase_)
loaded(inputs_dict['input_ids'].to(UpperCAmelCase_) , inputs_dict['attention_mask'].to(UpperCAmelCase_))
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : str = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
UpperCamelCase__ : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
with torch.no_grad():
UpperCamelCase__ : str = model(UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = torch.Size((1, 11, 768))
self.assertEqual(output.shape , UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : Tuple = downstream_dict['projector.weight']
UpperCamelCase__ : Any = downstream_dict['projector.bias']
UpperCamelCase__ : Dict = downstream_dict['model.post_net.linear.weight']
UpperCamelCase__ : int = downstream_dict['model.post_net.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : List[str] = downstream_dict['model.linear.weight']
UpperCamelCase__ : Optional[int] = downstream_dict['model.linear.bias']
return model
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Tuple = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_)
UpperCamelCase__ : Tuple = downstream_dict['connector.weight']
UpperCamelCase__ : str = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
UpperCamelCase__ : Tuple = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCamelCase__ : str = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCamelCase__ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase__ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase__ : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase__ : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase__ : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu')
UpperCamelCase__ : int = checkpoint['Downstream']
UpperCamelCase__ : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_)
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
UpperCamelCase__ : List[str] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
elif arch.endswith('ForAudioFrameClassification'):
UpperCamelCase__ : List[str] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
elif arch.endswith('ForXVector'):
UpperCamelCase__ : Optional[int] = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}')
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ : Optional[int] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_)
hf_model.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 6 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=_lowerCamelCase ):
_lowerCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
requires_backends(self , ['transformers', 'torch', 'note_seq'])
@classmethod
def __UpperCamelCase ( cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]):
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
@classmethod
def __UpperCamelCase ( cls : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple):
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase (_snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None):
super().__init__()
UpperCamelCase__ : Any = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ : Tuple = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__)
else:
UpperCamelCase__ : Any = None
UpperCamelCase__ : Optional[Any] = torch.nn.Parameter(lowerCAmelCase__)
class __lowercase (_snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : str , UpperCAmelCase_ : VQModel , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : TransformeraDModel , UpperCAmelCase_ : VQDiffusionScheduler , UpperCAmelCase_ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = len(lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else 1
# get prompt text embeddings
UpperCamelCase__ : List[Any] = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase__ : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}')
UpperCamelCase__ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__)
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ : List[str] = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ : Optional[Any] = negative_prompt_embeds.unsqueeze(0).repeat(lowerCAmelCase__ , 1 , 1)
else:
UpperCamelCase__ : List[Any] = [''] * batch_size
UpperCamelCase__ : List[Any] = text_input_ids.shape[-1]
UpperCamelCase__ : Tuple = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' , )
UpperCamelCase__ : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
UpperCamelCase__ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[str] = negative_prompt_embeds.shape[1]
UpperCamelCase__ : str = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1)
UpperCamelCase__ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
UpperCamelCase__ : Optional[Any] = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
UpperCamelCase__ : int = len(lowerCAmelCase__)
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}')
UpperCamelCase__ : List[str] = batch_size * num_images_per_prompt
UpperCamelCase__ : List[Any] = guidance_scale > 1.0
UpperCamelCase__ : Optional[Any] = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCAmelCase__)}.')
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ : List[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ : Optional[int] = self.transformer.num_vector_embeds - 1
UpperCamelCase__ : List[Any] = torch.full(lowerCAmelCase__ , lowerCAmelCase__).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F' {self.transformer.num_vector_embeds - 1} (inclusive).')
UpperCamelCase__ : int = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device)
UpperCamelCase__ : Optional[Any] = self.scheduler.timesteps.to(self.device)
UpperCamelCase__ : str = latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ : Union[str, Any] = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ : Optional[Any] = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__).sample
if do_classifier_free_guidance:
UpperCamelCase__, UpperCamelCase__ : Optional[int] = model_output.chunk(2)
UpperCamelCase__ : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__)
UpperCamelCase__ : str = self.truncate(lowerCAmelCase__ , lowerCAmelCase__)
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ : Tuple = model_output.clamp(-70)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Any = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase__ : Tuple = self.vqvae.config.vq_embed_dim
UpperCamelCase__ : int = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ : Optional[Any] = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__)
UpperCamelCase__ : int = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__).sample
UpperCamelCase__ : int = (image / 2 + 0.5).clamp(0 , 1)
UpperCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCamelCase__ : Dict = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : float):
UpperCamelCase__, UpperCamelCase__ : Tuple = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__)
UpperCamelCase__ : str = torch.exp(lowerCAmelCase__)
UpperCamelCase__ : Tuple = sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ : List[Any] = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__)
UpperCamelCase__ : Optional[int] = torch.cat((all_true, keep_mask) , dim=1)
UpperCamelCase__ : Dict = keep_mask[:, :-1, :]
UpperCamelCase__ : List[Any] = keep_mask.gather(1 , indices.argsort(1))
UpperCamelCase__ : Optional[Any] = log_p_x_0.clone()
UpperCamelCase__ : Union[str, Any] = -torch.inf # -inf = log(0)
return rv
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( lowerCamelCase_) -> tuple:
return (data["data"], data["target"])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : str = XGBRegressor(verbosity=0 , random_state=42)
xgb.fit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Predict target for test data
UpperCamelCase__ : int = xgb.predict(_SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] = predictions.reshape(len(_SCREAMING_SNAKE_CASE) , 1)
return predictions
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : Optional[Any] = fetch_california_housing()
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = data_handling(_SCREAMING_SNAKE_CASE)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Any = train_test_split(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1)
UpperCamelCase__ : Any = xgboost(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}')
print(f'Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}')
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.3_5_5_8_1_8,
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ : List[Any] = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(lowerCamelCase_)}'
)
raise ValueError(lowerCamelCase_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
from __future__ import annotations
import requests
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
UpperCamelCase__ : Optional[int] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_lowercase).json()
def __UpperCAmelCase ( lowerCamelCase_ = 10) -> list[dict]:
UpperCamelCase__ : Union[str, Any] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
UpperCamelCase__ : Optional[Any] = requests.get(_lowercase).json()[:max_stories]
return [get_hackernews_story(_lowercase) for story_id in story_ids]
def __UpperCAmelCase ( lowerCamelCase_ = 10) -> str:
UpperCamelCase__ : Tuple = hackernews_top_stories(_lowercase)
return "\n".join('* [{title}]({url})'.format(**_lowercase) for story in stories)
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class __lowercase (lowercase__ ):
_lowerCamelCase = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowerCamelCase = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowerCamelCase = '''question'''
_lowerCamelCase = '''context'''
_lowerCamelCase = '''answers'''
@property
def __UpperCamelCase ( self : int):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : list[list[str]] = [[] for _ in range(lowerCamelCase_)]
UpperCamelCase__ : Dict = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative')
if key == 1 or len(lowerCamelCase_) <= key:
return input_string
for position, character in enumerate(lowerCamelCase_):
UpperCamelCase__ : Dict = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : int = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase_)
UpperCamelCase__ : List[str] = [''.join(lowerCamelCase_) for row in temp_grid]
UpperCamelCase__ : Optional[int] = ''.join(lowerCamelCase_)
return output_string
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : List[str] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative')
if key == 1:
return input_string
UpperCamelCase__ : list[list[str]] = [[] for _ in range(lowerCamelCase_)] # generates template
for position in range(len(lowerCamelCase_)):
UpperCamelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : str = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append('*')
UpperCamelCase__ : List[Any] = 0
for row in temp_grid: # fills in the characters
UpperCamelCase__ : List[str] = input_string[counter : counter + len(lowerCamelCase_)]
grid.append(list(lowerCamelCase_))
counter += len(lowerCamelCase_)
UpperCamelCase__ : Tuple = '' # reads as zigzag
for position in range(len(lowerCamelCase_)):
UpperCamelCase__ : int = position % (lowest * 2) # puts it in bounds
UpperCamelCase__ : str = min(lowerCamelCase_ , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def __UpperCAmelCase ( lowerCamelCase_) -> dict[int, str]:
UpperCamelCase__ : Tuple = {}
for key_guess in range(1 , len(lowerCamelCase_)): # tries every key
UpperCamelCase__ : str = decrypt(lowerCamelCase_ , lowerCamelCase_)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase__ = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''audio''': Audio()} )
_lowerCamelCase = Features({'''transcription''': Value('''string''' )} )
_lowerCamelCase = '''audio'''
_lowerCamelCase = '''transcription'''
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple):
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.')
if not isinstance(features[self.audio_column] , _lowerCAmelCase):
raise ValueError(F'Column {self.audio_column} is not an Audio type.')
UpperCamelCase__ : List[Any] = copy.deepcopy(self)
UpperCamelCase__ : str = self.input_schema.copy()
UpperCamelCase__ : List[Any] = features[self.audio_column]
UpperCamelCase__ : Dict = input_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[int]):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''instructblip_vision_model'''
def __init__( self : List[Any] , UpperCAmelCase_ : Dict=1_408 , UpperCAmelCase_ : Dict=6_144 , UpperCAmelCase_ : Any=39 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : List[str]=14 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=1e-6 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=1e-10 , UpperCAmelCase_ : List[Any]=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**__A)
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Any = attention_dropout
UpperCamelCase__ : Tuple = layer_norm_eps
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Optional[int] = qkv_bias
@classmethod
def __UpperCamelCase ( cls : Dict , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]):
cls._set_token_in_kwargs(__A)
UpperCamelCase__ : Tuple = cls.get_config_dict(__A , **__A)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
UpperCamelCase__ : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__A , **__A)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''instructblip_qformer'''
def __init__( self : int , UpperCAmelCase_ : Union[str, Any]=30_522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Union[str, Any]="absolute" , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]=1_408 , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(pad_token_id=__A , **__A)
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : Tuple = num_attention_heads
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : List[Any] = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : Tuple = position_embedding_type
UpperCamelCase__ : Optional[int] = cross_attention_frequency
UpperCamelCase__ : Dict = encoder_hidden_size
@classmethod
def __UpperCamelCase ( cls : Dict , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Tuple):
cls._set_token_in_kwargs(__A)
UpperCamelCase__ : Optional[int] = cls.get_config_dict(__A , **__A)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
UpperCamelCase__ : Union[str, Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__A , **__A)
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''instructblip'''
_lowerCamelCase = True
def __init__( self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=32 , **UpperCAmelCase_ : Tuple):
super().__init__(**__A)
if vision_config is None:
UpperCamelCase__ : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
UpperCamelCase__ : Optional[int] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
UpperCamelCase__ : int = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
UpperCamelCase__ : Dict = InstructBlipVisionConfig(**__A)
UpperCamelCase__ : List[Any] = InstructBlipQFormerConfig(**__A)
UpperCamelCase__ : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase__ : str = CONFIG_MAPPING[text_model_type](**__A)
UpperCamelCase__ : Any = self.text_config.tie_word_embeddings
UpperCamelCase__ : Optional[int] = self.text_config.is_encoder_decoder
UpperCamelCase__ : Optional[int] = num_query_tokens
UpperCamelCase__ : Optional[int] = self.vision_config.hidden_size
UpperCamelCase__ : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase__ : Optional[Any] = 1.0
UpperCamelCase__ : int = 0.02
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : InstructBlipVisionConfig , UpperCAmelCase_ : InstructBlipQFormerConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Tuple , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__)
UpperCamelCase__ : str = self.vision_config.to_dict()
UpperCamelCase__ : List[str] = self.qformer_config.to_dict()
UpperCamelCase__ : Tuple = self.text_config.to_dict()
UpperCamelCase__ : List[str] = self.__class__.model_type
return output
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
for attribute in key.split('.'):
UpperCamelCase__ : int = getattr(__lowerCAmelCase , __lowerCAmelCase)
if weight_type is not None:
UpperCamelCase__ : int = getattr(__lowerCAmelCase , __lowerCAmelCase).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCamelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : int = value
else:
UpperCamelCase__ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Dict = fairseq_model.state_dict()
UpperCamelCase__ : str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : Dict = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase__ : List[Any] = name.split(__lowerCAmelCase)[0].split('.')[-2]
UpperCamelCase__ : List[str] = mapped_key.replace('*' , __lowerCAmelCase)
if "weight_g" in name:
UpperCamelCase__ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
UpperCamelCase__ : Union[str, Any] = """weight_v"""
elif "weight" in name:
UpperCamelCase__ : int = """weight"""
elif "bias" in name:
UpperCamelCase__ : List[Any] = """bias"""
else:
UpperCamelCase__ : Union[str, Any] = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
continue
if not is_used:
unused_weights.append(__lowerCAmelCase)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase__ : str = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : Union[str, Any] = name.split('.')
UpperCamelCase__ : Optional[int] = int(items[0])
UpperCamelCase__ : Any = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(__lowerCAmelCase)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[str]:
UpperCamelCase__ : Union[str, Any] = SEWConfig()
if is_finetuned:
UpperCamelCase__ : Tuple = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase__ : Dict = model.cfg
UpperCamelCase__ : Any = fs_config.conv_bias
UpperCamelCase__ : Optional[Any] = eval(fs_config.conv_feature_layers)
UpperCamelCase__ : Optional[Any] = [x[0] for x in conv_layers]
UpperCamelCase__ : List[Any] = [x[1] for x in conv_layers]
UpperCamelCase__ : Dict = [x[2] for x in conv_layers]
UpperCamelCase__ : List[str] = """gelu"""
UpperCamelCase__ : str = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
UpperCamelCase__ : Any = 0.0
UpperCamelCase__ : Tuple = fs_config.activation_fn.name
UpperCamelCase__ : Tuple = fs_config.encoder_embed_dim
UpperCamelCase__ : Optional[Any] = 0.02
UpperCamelCase__ : str = fs_config.encoder_ffn_embed_dim
UpperCamelCase__ : str = 1e-5
UpperCamelCase__ : str = fs_config.encoder_layerdrop
UpperCamelCase__ : Optional[int] = fs_config.encoder_attention_heads
UpperCamelCase__ : int = fs_config.conv_pos_groups
UpperCamelCase__ : Optional[int] = fs_config.conv_pos
UpperCamelCase__ : Union[str, Any] = len(__lowerCAmelCase)
UpperCamelCase__ : Optional[int] = fs_config.encoder_layers
UpperCamelCase__ : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase__ : Optional[Any] = model.cfg
UpperCamelCase__ : str = fs_config.final_dropout
UpperCamelCase__ : Union[str, Any] = fs_config.layerdrop
UpperCamelCase__ : List[str] = fs_config.activation_dropout
UpperCamelCase__ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase__ : str = fs_config.attention_dropout
UpperCamelCase__ : Optional[Any] = fs_config.dropout_input
UpperCamelCase__ : Union[str, Any] = fs_config.dropout
UpperCamelCase__ : Dict = fs_config.mask_channel_length
UpperCamelCase__ : int = fs_config.mask_channel_prob
UpperCamelCase__ : Tuple = fs_config.mask_length
UpperCamelCase__ : Tuple = fs_config.mask_prob
UpperCamelCase__ : Optional[int] = """Wav2Vec2FeatureExtractor"""
UpperCamelCase__ : int = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> str:
if is_finetuned:
UpperCamelCase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])})
else:
UpperCamelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if config_path is not None:
UpperCamelCase__ : str = SEWConfig.from_pretrained(__lowerCAmelCase)
else:
UpperCamelCase__ : str = convert_config(model[0] , __lowerCAmelCase)
UpperCamelCase__ : Optional[int] = model[0].eval()
UpperCamelCase__ : Dict = True if config.feat_extract_norm == """layer""" else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load(__lowerCAmelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Tuple = target_dict.pad_index
UpperCamelCase__ : List[str] = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : Any = target_dict.bos_index
UpperCamelCase__ : int = target_dict.eos_index
UpperCamelCase__ : List[str] = len(target_dict.symbols)
UpperCamelCase__ : Optional[int] = os.path.join(__lowerCAmelCase , 'vocab.json')
if not os.path.isdir(__lowerCAmelCase):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowerCAmelCase))
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase)
with open(__lowerCAmelCase , 'w' , encoding='utf-8') as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase)
UpperCamelCase__ : Dict = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowerCAmelCase , )
UpperCamelCase__ : int = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase)
processor.save_pretrained(__lowerCAmelCase)
UpperCamelCase__ : Optional[Any] = SEWForCTC(__lowerCAmelCase)
else:
UpperCamelCase__ : Dict = SEWModel(__lowerCAmelCase)
feature_extractor.save_pretrained(__lowerCAmelCase)
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
hf_model.save_pretrained(__lowerCAmelCase)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowercase (_snake_case ):
_lowerCamelCase = 42
class __lowercase (_snake_case , _snake_case ):
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 88 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ):
super().__init__()
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Union[str, Any] = attention_head_dim
UpperCamelCase__ : Any = num_attention_heads * attention_head_dim
UpperCamelCase__ : Tuple = in_channels
UpperCamelCase__ : Dict = torch.nn.GroupNorm(num_groups=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , eps=1e-6 , affine=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_)
# 3. Define transformers blocks
UpperCamelCase__ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dropout=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , double_self_attention=UpperCAmelCase_ , norm_elementwise_affine=UpperCAmelCase_ , )
for d in range(UpperCAmelCase_)
])
UpperCamelCase__ : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : bool = True , ):
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = hidden_states.shape
UpperCamelCase__ : List[Any] = batch_frames // num_frames
UpperCamelCase__ : Any = hidden_states
UpperCamelCase__ : Dict = hidden_states[None, :].reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = hidden_states.permute(0 , 2 , 1 , 3 , 4)
UpperCamelCase__ : List[Any] = self.norm(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.proj_in(UpperCAmelCase_)
# 2. Blocks
for block in self.transformer_blocks:
UpperCamelCase__ : Optional[Any] = block(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , class_labels=UpperCAmelCase_ , )
# 3. Output
UpperCamelCase__ : Optional[Any] = self.proj_out(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
UpperCamelCase__ : Tuple = hidden_states.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase_)
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n'
lowerCAmelCase__ = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowerCAmelCase__ = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="dummy_doc") -> Any:
UpperCamelCase__ : Any = {doc: key_lines}
UpperCamelCase__ : Any = {doc: sys_lines}
UpperCamelCase__ : int = {}
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = 0
UpperCamelCase__, UpperCamelCase__ : List[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__)
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCamelCase__ : Tuple = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase__, UpperCamelCase__ : Tuple = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__)
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCamelCase__ : Dict = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__)
if remove_nested:
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCamelCase__, UpperCamelCase__ : List[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCamelCase__ : Optional[Any] = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}')
logger.info(
'Number of resulting singleton clusters in the key '
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}')
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively')
return doc_coref_infos
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Any = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Any = 0
for name, metric in metrics:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1)
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa})
logger.info(
name.ljust(10) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
UpperCamelCase__ : Optional[int] = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}')
output_scores.update({'conll_score': conll})
return output_scores
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : int = False
for line in key_lines:
if not line.startswith('#'):
if len(line.split()) > 6:
UpperCamelCase__ : Tuple = line.split()[5]
if not parse_col == "-":
UpperCamelCase__ : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=False):
UpperCamelCase__ : Tuple = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
UpperCamelCase__ : Union[str, Any] = util.check_gold_parse_annotation(_UpperCAmelCase)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCamelCase__ : List[str] = evaluate(
key_lines=_UpperCAmelCase , sys_lines=_UpperCAmelCase , metrics=_UpperCAmelCase , NP_only=_UpperCAmelCase , remove_nested=_UpperCAmelCase , keep_singletons=_UpperCAmelCase , min_span=_UpperCAmelCase , )
return score
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCAmelCase__ = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None) -> Union[str, Any]:
UpperCamelCase__ : Tuple = XLNetConfig.from_json_file(__A)
UpperCamelCase__ : List[Any] = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}')
UpperCamelCase__ : int = finetuning_task
UpperCamelCase__ : Union[str, Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCamelCase__ : int = XLNetForSequenceClassification(__A)
elif "squad" in finetuning_task:
UpperCamelCase__ : Dict = finetuning_task
UpperCamelCase__ : Optional[Any] = XLNetForQuestionAnswering(__A)
else:
UpperCamelCase__ : Any = XLNetLMHeadModel(__A)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A)
# Save pytorch-model
UpperCamelCase__ : Optional[Any] = os.path.join(__A , __A)
UpperCamelCase__ : Any = os.path.join(__A , __A)
print(f'Save PyTorch model to {os.path.abspath(__A)}')
torch.save(model.state_dict() , __A)
print(f'Save configuration file to {os.path.abspath(__A)}')
with open(__A , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCAmelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ = get_tests_dir('fixtures')
lowerCAmelCase__ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase__ = get_tests_dir('fixtures/dummy-config.json')
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = 0
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Tuple = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(_A , _A)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = AutoFeatureExtractor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def __UpperCamelCase ( self : List[str]):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ : Any = AutoFeatureExtractor.from_pretrained(_A).to_dict()
config_dict.pop('feature_extractor_type')
UpperCamelCase__ : str = WavaVecaFeatureExtractor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
UpperCamelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
UpperCamelCase__ : Dict = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def __UpperCamelCase ( self : Union[str, Any]):
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier'):
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('bert-base')
def __UpperCamelCase ( self : Any):
with self.assertRaisesRegex(
_A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa')
def __UpperCamelCase ( self : Any):
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCamelCase__ : str = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def __UpperCamelCase ( self : Any):
with self.assertRaises(_A):
UpperCamelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
UpperCamelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A)
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A)
UpperCamelCase__ : str = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def __UpperCamelCase ( self : str):
try:
AutoConfig.register('custom' , _A)
AutoFeatureExtractor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoFeatureExtractor.register(_A , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ : Tuple = CustomFeatureExtractor.from_pretrained(_A)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A)
UpperCamelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self : Union[str, Any]):
class __lowercase (a__ ):
_lowerCamelCase = True
try:
AutoConfig.register('custom' , _A)
AutoFeatureExtractor.register(_A , _A)
# If remote code is not set, the default is to use local
UpperCamelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
UpperCamelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
UpperCamelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowercase (nn.Module ):
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "layer_norm" , UpperCAmelCase_ : bool = False , ):
super().__init__()
UpperCamelCase__ : Tuple = only_cross_attention
UpperCamelCase__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
UpperCamelCase__ : Any = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.')
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCamelCase__ : str = AdaLayerNorm(__lowerCamelCase , __lowerCamelCase)
elif self.use_ada_layer_norm_zero:
UpperCamelCase__ : Union[str, Any] = AdaLayerNormZero(__lowerCamelCase , __lowerCamelCase)
else:
UpperCamelCase__ : Tuple = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
UpperCamelCase__ : Optional[int] = Attention(
query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCamelCase__ : Any = (
AdaLayerNorm(__lowerCamelCase , __lowerCamelCase)
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
)
UpperCamelCase__ : int = Attention(
query_dim=__lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , upcast_attention=__lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : str = None
# 3. Feed-forward
UpperCamelCase__ : Dict = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
UpperCamelCase__ : int = FeedForward(__lowerCamelCase , dropout=__lowerCamelCase , activation_fn=__lowerCamelCase , final_dropout=__lowerCamelCase)
# let chunk size default to None
UpperCamelCase__ : str = None
UpperCamelCase__ : Any = 0
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int):
# Sets chunk feed-forward
UpperCamelCase__ : Tuple = chunk_size
UpperCamelCase__ : Union[str, Any] = dim
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Dict[str, Any] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCamelCase__ : Tuple = self.norma(__lowerCamelCase , __lowerCamelCase)
elif self.use_ada_layer_norm_zero:
UpperCamelCase__ : Dict = self.norma(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hidden_dtype=hidden_states.dtype)
else:
UpperCamelCase__ : Union[str, Any] = self.norma(__lowerCamelCase)
UpperCamelCase__ : Optional[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCamelCase__ : Dict = self.attna(
__lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
if self.use_ada_layer_norm_zero:
UpperCamelCase__ : Dict = gate_msa.unsqueeze(1) * attn_output
UpperCamelCase__ : Optional[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCamelCase__ : Optional[Any] = (
self.norma(__lowerCamelCase , __lowerCamelCase) if self.use_ada_layer_norm else self.norma(__lowerCamelCase)
)
UpperCamelCase__ : int = self.attna(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__ : List[Any] = attn_output + hidden_states
# 3. Feed-forward
UpperCamelCase__ : Union[str, Any] = self.norma(__lowerCamelCase)
if self.use_ada_layer_norm_zero:
UpperCamelCase__ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.')
UpperCamelCase__ : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCamelCase__ : Dict = torch.cat(
[self.ff(__lowerCamelCase) for hid_slice in norm_hidden_states.chunk(__lowerCamelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
UpperCamelCase__ : List[str] = self.ff(__lowerCamelCase)
if self.use_ada_layer_norm_zero:
UpperCamelCase__ : Optional[Any] = gate_mlp.unsqueeze(1) * ff_output
UpperCamelCase__ : Union[str, Any] = ff_output + hidden_states
return hidden_states
class __lowercase (nn.Module ):
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = False , ):
super().__init__()
UpperCamelCase__ : List[str] = int(dim * mult)
UpperCamelCase__ : Tuple = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCamelCase__ : List[str] = GELU(__lowerCamelCase , __lowerCamelCase)
if activation_fn == "gelu-approximate":
UpperCamelCase__ : str = GELU(__lowerCamelCase , __lowerCamelCase , approximate='tanh')
elif activation_fn == "geglu":
UpperCamelCase__ : Any = GEGLU(__lowerCamelCase , __lowerCamelCase)
elif activation_fn == "geglu-approximate":
UpperCamelCase__ : Optional[int] = ApproximateGELU(__lowerCamelCase , __lowerCamelCase)
UpperCamelCase__ : Any = nn.ModuleList([])
# project in
self.net.append(__lowerCamelCase)
# project dropout
self.net.append(nn.Dropout(__lowerCamelCase))
# project out
self.net.append(nn.Linear(__lowerCamelCase , __lowerCamelCase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCamelCase))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
for module in self.net:
UpperCamelCase__ : Optional[int] = module(__lowerCamelCase)
return hidden_states
class __lowercase (nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "none"):
super().__init__()
UpperCamelCase__ : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase)
UpperCamelCase__ : int = approximate
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str):
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = self.proj(__lowerCamelCase)
UpperCamelCase__ : Optional[int] = self.gelu(__lowerCamelCase)
return hidden_states
class __lowercase (nn.Module ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
super().__init__()
UpperCamelCase__ : str = nn.Linear(__lowerCamelCase , dim_out * 2)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : Union[str, Any] = self.proj(__lowerCamelCase).chunk(2 , dim=-1)
return hidden_states * self.gelu(__lowerCamelCase)
class __lowercase (nn.Module ):
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
super().__init__()
UpperCamelCase__ : int = nn.Linear(__lowerCamelCase , __lowerCamelCase)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Tuple = self.proj(__lowerCamelCase)
return x * torch.sigmoid(1.7_02 * x)
class __lowercase (nn.Module ):
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
super().__init__()
UpperCamelCase__ : List[str] = nn.Embedding(__lowerCamelCase , __lowerCamelCase)
UpperCamelCase__ : Dict = nn.SiLU()
UpperCamelCase__ : Any = nn.Linear(__lowerCamelCase , embedding_dim * 2)
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[int] = self.linear(self.silu(self.emb(__lowerCamelCase)))
UpperCamelCase__ : str = torch.chunk(__lowerCamelCase , 2)
UpperCamelCase__ : Optional[int] = self.norm(__lowerCamelCase) * (1 + scale) + shift
return x
class __lowercase (nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
super().__init__()
UpperCamelCase__ : Dict = CombinedTimestepLabelEmbeddings(__lowerCamelCase , __lowerCamelCase)
UpperCamelCase__ : Dict = nn.SiLU()
UpperCamelCase__ : Any = nn.Linear(__lowerCamelCase , 6 * embedding_dim , bias=__lowerCamelCase)
UpperCamelCase__ : Tuple = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase , eps=1e-6)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=None):
UpperCamelCase__ : Union[str, Any] = self.linear(self.silu(self.emb(__lowerCamelCase , __lowerCamelCase , hidden_dtype=__lowerCamelCase)))
UpperCamelCase__ : Optional[Any] = emb.chunk(6 , dim=1)
UpperCamelCase__ : int = self.norm(__lowerCamelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowercase (nn.Module ):
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : float = 1e-5):
super().__init__()
UpperCamelCase__ : Dict = num_groups
UpperCamelCase__ : List[str] = eps
if act_fn is None:
UpperCamelCase__ : List[str] = None
else:
UpperCamelCase__ : Optional[int] = get_activation(__lowerCamelCase)
UpperCamelCase__ : List[Any] = nn.Linear(__lowerCamelCase , out_dim * 2)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int):
if self.act:
UpperCamelCase__ : int = self.act(__lowerCamelCase)
UpperCamelCase__ : Optional[int] = self.linear(__lowerCamelCase)
UpperCamelCase__ : Any = emb[:, :, None, None]
UpperCamelCase__ : Union[str, Any] = emb.chunk(2 , dim=1)
UpperCamelCase__ : Optional[Any] = F.group_norm(__lowerCamelCase , self.num_groups , eps=self.eps)
UpperCamelCase__ : Any = x * (1 + scale) + shift
return x
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_lowerCamelCase = '''dpr'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=30_522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Union[str, Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : Any="absolute" , UpperCAmelCase_ : int = 0 , **UpperCAmelCase_ : Tuple , ):
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Optional[Any] = hidden_act
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : Optional[int] = type_vocab_size
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : List[str] = layer_norm_eps
UpperCamelCase__ : List[Any] = projection_dim
UpperCamelCase__ : List[Any] = position_embedding_type
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = int(_A)
if decimal in (0, 1): # Exit cases for the recursion
return str(_A)
UpperCamelCase__, UpperCamelCase__ : int = divmod(_A , 2)
return binary_recursive(_A) + str(_A)
def __UpperCAmelCase ( lowerCamelCase_) -> str:
UpperCamelCase__ : List[str] = str(_A).strip()
if not number:
raise ValueError('No input value was provided')
UpperCamelCase__ : Any = '-' if number.startswith('-') else ''
UpperCamelCase__ : Dict = number.lstrip('-')
if not number.isnumeric():
raise ValueError('Input value is not an integer')
return f'{negative}0b{binary_recursive(int(_A))}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / """model_card_template.md"""
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def __UpperCAmelCase ( lowerCamelCase_ = None) -> str:
UpperCamelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(lowerCamelCase_ , lowerCamelCase_):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items())
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
ua += "; " + user_agent
return ua
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Union[str, Any]:
if token is None:
UpperCamelCase__ : Union[str, Any] = HfFolder.get_token()
if organization is None:
UpperCamelCase__ : Optional[Any] = whoami(lowerCamelCase_)['name']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.')
if hasattr(lowerCamelCase_ , 'local_rank') and args.local_rank not in [-1, 0]:
return
UpperCamelCase__ : Tuple = args.hub_token if hasattr(lowerCamelCase_ , 'hub_token') else None
UpperCamelCase__ : Union[str, Any] = get_full_repo_name(lowerCamelCase_ , token=lowerCamelCase_)
UpperCamelCase__ : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=lowerCamelCase_ , model_name=lowerCamelCase_ , repo_name=lowerCamelCase_ , dataset_name=args.dataset_name if hasattr(lowerCamelCase_ , 'dataset_name') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(lowerCamelCase_ , 'gradient_accumulation_steps') else None
) , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase_ , 'adam_beta1') else None , adam_betaa=args.adam_betaa if hasattr(lowerCamelCase_ , 'adam_beta2') else None , adam_weight_decay=args.adam_weight_decay if hasattr(lowerCamelCase_ , 'adam_weight_decay') else None , adam_epsilon=args.adam_epsilon if hasattr(lowerCamelCase_ , 'adam_epsilon') else None , lr_scheduler=args.lr_scheduler if hasattr(lowerCamelCase_ , 'lr_scheduler') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(lowerCamelCase_ , 'lr_warmup_steps') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(lowerCamelCase_ , 'ema_inv_gamma') else None , ema_power=args.ema_power if hasattr(lowerCamelCase_ , 'ema_power') else None , ema_max_decay=args.ema_max_decay if hasattr(lowerCamelCase_ , 'ema_max_decay') else None , mixed_precision=args.mixed_precision , )
UpperCamelCase__ : List[str] = os.path.join(args.output_dir , 'README.md')
model_card.save(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None) -> str:
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCamelCase__ : Union[str, Any] = str(Path(lowerCamelCase_).as_posix())
UpperCamelCase__ : List[Any] = re.search(R'snapshots/([^/]+)/' , lowerCamelCase_)
if search is None:
return None
UpperCamelCase__ : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(lowerCamelCase_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, 'diffusers')
def __UpperCAmelCase ( lowerCamelCase_ = None , lowerCamelCase_ = None) -> None:
if new_cache_dir is None:
UpperCamelCase__ : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCamelCase__ : str = old_diffusers_cache
UpperCamelCase__ : Optional[int] = Path(lowerCamelCase_).expanduser()
UpperCamelCase__ : Any = Path(lowerCamelCase_).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*'):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCamelCase__ : str = new_cache_dir / old_blob_path.relative_to(lowerCamelCase_)
new_blob_path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_)
os.replace(lowerCamelCase_ , lowerCamelCase_)
try:
os.symlink(lowerCamelCase_ , lowerCamelCase_)
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = None) -> str:
if variant is not None:
UpperCamelCase__ : Any = weights_name.split('.')
UpperCamelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCamelCase__ : Optional[Any] = '.'.join(lowerCamelCase_)
return weights_name
def __UpperCAmelCase ( lowerCamelCase_ , *,
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ) -> Optional[Any]:
UpperCamelCase__ : str = str(lowerCamelCase_)
if os.path.isfile(lowerCamelCase_):
return pretrained_model_name_or_path
elif os.path.isdir(lowerCamelCase_):
if os.path.isfile(os.path.join(lowerCamelCase_ , lowerCamelCase_)):
# Load from a PyTorch checkpoint
UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)):
UpperCamelCase__ : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.')
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(lowerCamelCase_).base_version) >= version.parse('0.20.0')
):
try:
UpperCamelCase__ : List[Any] = hf_hub_download(
lowerCamelCase_ , filename=_add_variant(lowerCamelCase_ , lowerCamelCase_) , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , user_agent=lowerCamelCase_ , subfolder=lowerCamelCase_ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , lowerCamelCase_ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(lowerCamelCase_ , lowerCamelCase_)} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(lowerCamelCase_ , lowerCamelCase_)}\' so that the correct variant file can be added.' , lowerCamelCase_ , )
try:
# 2. Load model file as usual
UpperCamelCase__ : Dict = hf_hub_download(
lowerCamelCase_ , filename=lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , user_agent=lowerCamelCase_ , subfolder=lowerCamelCase_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.')
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'this model name. Check the model page at '
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.')
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.')
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}')
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.')
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}') | 719 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive')
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive')
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive')
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration')
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration')
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __lowercase (SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = '''nllb-moe'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=128_112 , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=4_096 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=4_096 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : int=0.05 , UpperCAmelCase_ : Tuple=0.05 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]="relu" , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]="float32" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Union[str, Any]=64 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=0.0_01 , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : Tuple="all" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Any=0.2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[int]=False , **UpperCAmelCase_ : Union[str, Any] , ):
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : str = d_model
UpperCamelCase__ : List[Any] = encoder_ffn_dim
UpperCamelCase__ : Optional[int] = encoder_layers
UpperCamelCase__ : Union[str, Any] = encoder_attention_heads
UpperCamelCase__ : List[Any] = decoder_ffn_dim
UpperCamelCase__ : Optional[Any] = decoder_layers
UpperCamelCase__ : Optional[Any] = decoder_attention_heads
UpperCamelCase__ : List[str] = dropout
UpperCamelCase__ : int = attention_dropout
UpperCamelCase__ : int = activation_dropout
UpperCamelCase__ : List[str] = activation_function
UpperCamelCase__ : Any = init_std
UpperCamelCase__ : str = encoder_layerdrop
UpperCamelCase__ : Optional[int] = decoder_layerdrop
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : Tuple = encoder_layers
UpperCamelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ : int = router_z_loss_coef
UpperCamelCase__ : Optional[int] = router_aux_loss_coef
UpperCamelCase__ : Union[str, Any] = decoder_sparse_step
UpperCamelCase__ : Any = encoder_sparse_step
UpperCamelCase__ : Optional[Any] = num_experts
UpperCamelCase__ : Union[str, Any] = expert_capacity
UpperCamelCase__ : Union[str, Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
UpperCamelCase__ : Optional[int] = router_dtype
UpperCamelCase__ : Optional[Any] = router_ignore_padding_tokens
UpperCamelCase__ : Union[str, Any] = batch_prioritized_routing
UpperCamelCase__ : List[str] = second_expert_policy
UpperCamelCase__ : Optional[Any] = normalize_router_prob_before_dropping
UpperCamelCase__ : List[Any] = moe_eval_capacity_token_fraction
UpperCamelCase__ : List[Any] = moe_token_dropout
UpperCamelCase__ : Tuple = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 720 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase ( lowerCamelCase_) -> List[Tuple[int, ...]]:
UpperCamelCase__ : int = []
if isinstance(lowerCamelCase_ , lowerCamelCase_):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase_))
elif isinstance(lowerCamelCase_ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError('Not supported')
return shapes
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Tuple[int, ...]:
UpperCamelCase__ : int = []
for d in reversed(lowerCamelCase_):
idx.append(flat_idx % d)
UpperCamelCase__ : Any = flat_idx // d
return tuple(reversed(lowerCamelCase_))
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = True
for i in range(len(lowerCamelCase_)):
UpperCamelCase__ : List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ : Optional[Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase_)
if end_edges is None:
UpperCamelCase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_)]
reduce_edge_list(lowerCamelCase_)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase_) == 0:
return [()]
elif len(lowerCamelCase_) == 1:
return [(slice(start[0] , end[0] + 1),)]
UpperCamelCase__ : List[Tuple[slice, ...]] = []
UpperCamelCase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase_ , lowerCamelCase_):
if s == e:
path_list.append(slice(lowerCamelCase_ , s + 1))
else:
break
UpperCamelCase__ : Tuple[slice, ...] = tuple(lowerCamelCase_)
UpperCamelCase__ : Dict = len(lowerCamelCase_)
# start == end, and we're done
if divergence_idx == len(lowerCamelCase_):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : str = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ : Optional[int] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase_ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
UpperCamelCase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> torch.Tensor:
UpperCamelCase__ : List[Any] = t.shape[:no_batch_dims]
UpperCamelCase__ : Optional[int] = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_))
# _get_minimal_slice_set is inclusive
UpperCamelCase__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_))
# Get an ordered list of slices to perform
UpperCamelCase__ : int = _get_minimal_slice_set(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
UpperCamelCase__ : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Any:
if not (len(lowerCamelCase_) > 0):
raise ValueError('Must provide at least one input')
UpperCamelCase__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_)]
UpperCamelCase__ : int = tuple([max(lowerCamelCase_) for s in zip(*lowerCamelCase_)])
def _prep_inputs(lowerCamelCase_) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
UpperCamelCase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
UpperCamelCase__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
UpperCamelCase__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase_)
UpperCamelCase__ : int = None
if _out is not None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
UpperCamelCase__ : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase_) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = prepped_outputs
for _ in range(lowerCamelCase_):
# Chunk the input
if not low_mem:
UpperCamelCase__ : str = _select_chunk
else:
UpperCamelCase__ : List[Any] = partial(
_chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size) , no_batch_dims=len(lowerCamelCase_) , )
UpperCamelCase__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_)
# Run the layer on the chunk
UpperCamelCase__ : List[Any] = layer(**lowerCamelCase_)
# Allocate space for the output
if out is None:
UpperCamelCase__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase_: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCamelCase_)
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase_ , lowerCamelCase_):
def assign(lowerCamelCase_ , lowerCamelCase_) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_):
assign(lowerCamelCase_ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ : List[str] = da[k]
assign(lowerCamelCase_ , lowerCamelCase_)
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ : int = xa
elif isinstance(lowerCamelCase_ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ : Dict = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
UpperCamelCase__ : int = tensor_tree_map(lambda lowerCamelCase_: t.view(orig_batch_dims + t.shape[1:]) , lowerCamelCase_)
return out
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : int = 512 , ):
UpperCamelCase__ : str = max_chunk_size
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[tuple] = None
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int):
logging.info('Tuning chunk size...')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
UpperCamelCase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ : List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase_ : int) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_)
return True
except RuntimeError:
return False
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Dict = len(UpperCAmelCase_) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ : Optional[int] = test_chunk_size(candidates[i])
if not viable:
UpperCamelCase__ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ : Optional[int] = i
UpperCamelCase__ : Dict = (i + len(UpperCAmelCase_) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Iterable , UpperCAmelCase_ : Iterable):
UpperCamelCase__ : List[str] = True
for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_):
assert type(UpperCAmelCase_) == type(UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , (list, tuple)):
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
UpperCamelCase__ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase_: x[0])]
consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_)
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Callable , UpperCAmelCase_ : tuple , UpperCAmelCase_ : int , ):
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : tuple = tree_map(lambda UpperCAmelCase_: a.shape if isinstance(UpperCAmelCase_ , torch.Tensor) else a , UpperCAmelCase_ , UpperCAmelCase_)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_)
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ : Optional[int] = False
if not consistent:
UpperCamelCase__ : Tuple = self._determine_favorable_chunk_size(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 6 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __lowercase (_lowercase ):
_lowerCamelCase = '''mra'''
def __init__( self : List[Any] , UpperCAmelCase_ : Tuple=50_265 , UpperCAmelCase_ : int=768 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Tuple=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-5 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Union[str, Any]="full" , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Any=2 , **UpperCAmelCase_ : Tuple , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = layer_norm_eps
UpperCamelCase__ : Optional[Any] = position_embedding_type
UpperCamelCase__ : Any = block_per_row
UpperCamelCase__ : Optional[Any] = approx_mode
UpperCamelCase__ : List[Any] = initial_prior_first_n_blocks
UpperCamelCase__ : Any = initial_prior_diagonal_n_blocks
| 721 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : str = [1]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Optional[Any] = 0, 0, 0
UpperCamelCase__ : int = ugly_nums[ia] * 2
UpperCamelCase__ : str = ugly_nums[ia] * 3
UpperCamelCase__ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Tuple = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
ugly_nums.append(__SCREAMING_SNAKE_CASE)
if next_num == next_a:
ia += 1
UpperCamelCase__ : Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase__ : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase__ : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 700 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "cpu" , lowerCamelCase_ = None) -> None:
UpperCamelCase__ : List[Any] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_)
for k, v in tqdm(state_dict.items()):
if not isinstance(lowerCamelCase_ , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
UpperCamelCase__ : int = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase__ : List[Any] = src_path
torch.save(lowerCamelCase_ , lowerCamelCase_)
if __name__ == "__main__":
fire.Fire(convert)
| 6 | 0 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'allenai/led-base-16384': 1_6384,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LEDTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]="replace" , UpperCAmelCase_ : str="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space:
UpperCamelCase__ : Any = getattr(UpperCAmelCase_ , pre_tok_state.pop('type'))
UpperCamelCase__ : Optional[int] = add_prefix_space
UpperCamelCase__ : Union[str, Any] = pre_tok_class(**UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase__ : str = 'post_processor'
UpperCamelCase__ : str = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
if tokenizer_component_instance:
UpperCamelCase__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : int = tuple(state['sep'])
if "cls" in state:
UpperCamelCase__ : Tuple = tuple(state['cls'])
UpperCamelCase__ : Optional[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase_) != add_prefix_space:
UpperCamelCase__ : int = add_prefix_space
UpperCamelCase__ : Union[str, Any] = True
if state.get('trim_offsets' , UpperCAmelCase_) != trim_offsets:
UpperCamelCase__ : List[str] = trim_offsets
UpperCamelCase__ : Tuple = True
if changes_to_apply:
UpperCamelCase__ : List[str] = getattr(UpperCAmelCase_ , state.pop('type'))
UpperCamelCase__ : Any = component_class(**UpperCAmelCase_)
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __UpperCamelCase ( self : Dict):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else value
UpperCamelCase__ : Optional[int] = value
def __UpperCamelCase ( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str]):
UpperCamelCase__ : str = kwargs.get('is_split_into_words' , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : List[str] = kwargs.get('is_split_into_words' , UpperCAmelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any = None):
UpperCamelCase__ : List[str] = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None):
UpperCamelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple = None):
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = None , ):
UpperCamelCase__ : int = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase__ : Union[str, Any] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase__ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase__ : Union[str, Any] = len(encoded_inputs['global_attention_mask']) != len(UpperCAmelCase_)
if needs_to_be_padded:
UpperCamelCase__ : int = len(UpperCAmelCase_) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase__ : List[str] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase__ : Any = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 701 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=[2, 2, 2, 2] , UpperCAmelCase_ : List[str]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Union[str, Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Tuple=[4, 4, 4, 4] , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[int]=255 , **UpperCAmelCase_ : Tuple , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_encoder_blocks
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : int = sr_ratios
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : List[str] = patch_sizes
UpperCamelCase__ : Optional[int] = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : List[str] = classifier_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : Union[str, Any] = drop_path_rate
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Dict = decoder_hidden_size
UpperCamelCase__ : List[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : List[str] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Optional[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Any):
return 12
| 6 | 0 |
'''simple docstring'''
import string
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Union[str, Any] = ""
for i in sequence:
UpperCamelCase__ : int = ord(snake_case_)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : str = string.ascii_letters
UpperCamelCase__ : Optional[int] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case_)] if c in letters else c for c in sequence)
def __UpperCAmelCase ( ) -> Optional[Any]:
from timeit import timeit
print('Running performance benchmarks...')
UpperCamelCase__ : str = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=snake_case_)} seconds')
print(f'> atbash(): {timeit("atbash(printable)" , setup=snake_case_)} seconds')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 702 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( lowerCamelCase_ = 5_000) -> Dict:
UpperCamelCase__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase)]
for i, pentagonal_i in enumerate(_lowerCamelCase):
for j in range(_lowerCamelCase , len(_lowerCamelCase)):
UpperCamelCase__ : Any = pentagonal_nums[j]
UpperCamelCase__ : int = pentagonal_i + pentagonal_j
UpperCamelCase__ : Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase) and is_pentagonal(_lowerCamelCase):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 703 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCAmelCase ( lowerCamelCase_) -> float:
return np.dot(lowerCamelCase_ , lowerCamelCase_)
class __lowercase :
def __init__( self : Tuple , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ):
UpperCamelCase__ : Union[str, Any] = regularization
UpperCamelCase__ : Optional[int] = gamma
if kernel == "linear":
UpperCamelCase__ : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
UpperCamelCase__ : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(UpperCAmelCase_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray):
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Any = observations
UpperCamelCase__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__), ) : Optional[Any] = np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
UpperCamelCase__ : Union[str, Any] = 0
((UpperCamelCase__), ) : int = np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
UpperCamelCase__ : List[str] = LinearConstraint(UpperCAmelCase_ , 0 , 0)
UpperCamelCase__ : Dict = Bounds(0 , self.regularization)
UpperCamelCase__ : Any = minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
UpperCamelCase__ : str = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ : Any = 0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
UpperCamelCase__ : List[str] = s / n
def __UpperCamelCase ( self : str , UpperCAmelCase_ : ndarray):
UpperCamelCase__ : Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_a).to(_a)
UpperCamelCase__ : int = AutoTokenizer.from_pretrained('google/mt5-small')
UpperCamelCase__ : int = tokenizer('Hello there' , return_tensors='pt').input_ids
UpperCamelCase__ : Union[str, Any] = tokenizer('Hi I am' , return_tensors='pt').input_ids
UpperCamelCase__ : Tuple = model(input_ids.to(_a) , labels=labels.to(_a)).loss
UpperCamelCase__ : Optional[int] = -(labels.shape[-1] * loss.item())
UpperCamelCase__ : Optional[int] = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 704 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_) -> Any:
UpperCamelCase__ : Dict = DPTConfig()
if "large" in checkpoint_url:
UpperCamelCase__ : List[str] = 1_024
UpperCamelCase__ : List[str] = 4_096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
UpperCamelCase__ : List[str] = [5, 11, 17, 23]
UpperCamelCase__ : str = [256, 512, 1_024, 1_024]
UpperCamelCase__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = 150
UpperCamelCase__ : int = 'huggingface/label-files'
UpperCamelCase__ : List[Any] = 'ade20k-id2label.json'
UpperCamelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset')) , 'r'))
UpperCamelCase__ : int = {int(lowerCamelCase_): v for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = idalabel
UpperCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
UpperCamelCase__ : Tuple = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
UpperCamelCase__ : Tuple = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
UpperCamelCase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
UpperCamelCase__ : Optional[Any] = name.replace('proj' , 'projection')
if "blocks" in name:
UpperCamelCase__ : int = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
UpperCamelCase__ : int = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
UpperCamelCase__ : Tuple = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
UpperCamelCase__ : List[Any] = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
UpperCamelCase__ : int = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
UpperCamelCase__ : int = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
UpperCamelCase__ : Optional[Any] = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
UpperCamelCase__ : List[Any] = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
UpperCamelCase__ : List[str] = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
UpperCamelCase__ : int = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase__ : Any = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4)}')
if "out_conv" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
UpperCamelCase__ : int = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
UpperCamelCase__ : Optional[Any] = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
UpperCamelCase__ : Optional[Any] = name.replace('conv1' , 'convolution1')
if "conv2" in name:
UpperCamelCase__ : int = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase__ : int = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase__ : Tuple = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase__ : Any = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase__ : List[Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
UpperCamelCase__ : List[str] = name.replace('pretrained' , 'dpt')
if "bn" in name:
UpperCamelCase__ : Tuple = name.replace('bn' , 'batch_norm')
if "head" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('head' , 'head.head')
if "encoder.norm" in name:
UpperCamelCase__ : int = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Optional[int] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight')
UpperCamelCase__ : Any = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : List[str] = in_proj_weight[: config.hidden_size, :]
UpperCamelCase__ : List[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : int = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ : List[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase__, UpperCamelCase__ : Any = get_dpt_config(lowerCamelCase_)
# load original state_dict from URL
UpperCamelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(lowerCamelCase_)
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase__ : str = state_dict.pop(lowerCamelCase_)
UpperCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_)
# load HuggingFace model
UpperCamelCase__ : str = DPTForSemanticSegmentation(lowerCamelCase_) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_)
model.load_state_dict(lowerCamelCase_)
model.eval()
# Check outputs on an image
UpperCamelCase__ : Any = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase__ : List[Any] = DPTImageProcessor(size=lowerCamelCase_)
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase_ , return_tensors='pt')
# forward pass
UpperCamelCase__ : Any = model(**lowerCamelCase_).logits if 'ade' in checkpoint_url else model(**lowerCamelCase_).predicted_depth
# Assert logits
UpperCamelCase__ : Tuple = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]])
if "ade" in checkpoint_url:
UpperCamelCase__ : List[str] = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]])
assert outputs.shape == torch.Size(lowerCamelCase_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_)
)
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase_)
print(f'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 6 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ = logging.getLogger(__name__)
class __lowercase (__a ):
_lowerCamelCase = '''sequence-classification'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
if type(a_) == dict:
UpperCamelCase__ : Dict = Namespace(**a_)
UpperCamelCase__ : List[Any] = glue_output_modes[hparams.task]
UpperCamelCase__ : Optional[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode)
def __UpperCamelCase ( self : List[Any] , **UpperCAmelCase_ : Optional[Any]):
return self.model(**a_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCamelCase__ : Union[str, Any] = self(**a_)
UpperCamelCase__ : List[str] = outputs[0]
UpperCamelCase__ : Tuple = self.trainer.lr_schedulers[0]["""scheduler"""]
UpperCamelCase__ : List[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = self.hparams
UpperCamelCase__ : List[str] = processors[args.task]()
UpperCamelCase__ : Optional[Any] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase__ : Union[str, Any] = self._feature_file(a_)
if os.path.exists(a_) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
UpperCamelCase__ : Any = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
UpperCamelCase__ : str = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_)
torch.save(a_ , a_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False):
UpperCamelCase__ : Optional[int] = """dev""" if mode == """test""" else mode
UpperCamelCase__ : str = self._feature_file(a_)
logger.info('Loading features from cached file %s' , a_)
UpperCamelCase__ : int = torch.load(a_)
UpperCamelCase__ : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
UpperCamelCase__ : List[str] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
UpperCamelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : List[Any] = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_) , batch_size=a_ , shuffle=a_ , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]):
UpperCamelCase__ : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
UpperCamelCase__ : Optional[int] = self(**a_)
UpperCamelCase__ : Union[str, Any] = outputs[:2]
UpperCamelCase__ : List[str] = logits.detach().cpu().numpy()
UpperCamelCase__ : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
UpperCamelCase__ : Tuple = np.concatenate([x['pred'] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : Tuple = np.argmax(a_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : Optional[int] = np.squeeze(a_)
UpperCamelCase__ : Optional[Any] = np.concatenate([x['target'] for x in outputs] , axis=0)
UpperCamelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Tuple = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_)}
UpperCamelCase__ : Optional[int] = dict(results.items())
UpperCamelCase__ : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list):
UpperCamelCase__ : Tuple = self._eval_end(a_)
UpperCamelCase__ : int = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Any):
UpperCamelCase__ : str = self._eval_end(a_)
UpperCamelCase__ : List[str] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple):
BaseTransformer.add_model_specific_args(a_ , a_)
parser.add_argument(
'--max_seq_length' , default=128 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
def __UpperCAmelCase ( ) -> str:
UpperCamelCase__ : Dict = argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd())
UpperCamelCase__ : Optional[Any] = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd())
UpperCamelCase__ : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase__ : int = os.path.join(
'./results' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S")}' , )
os.makedirs(args.output_dir)
UpperCamelCase__ : Optional[Any] = GLUETransformer(snake_case__)
UpperCamelCase__ : Union[str, Any] = generic_train(snake_case__ , snake_case__)
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase__ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt') , recursive=snake_case__))
UpperCamelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
return trainer.test(snake_case__)
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_=0) -> Optional[Any]:
return sorted(__snake_case , key=lambda lowerCamelCase_: x[column])
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('inf')) -> int:
for i in range(points_counts - 1):
for j in range(i + 1 , __snake_case):
UpperCamelCase__ : int = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
UpperCamelCase__ : Union[str, Any] = current_dis
return min_dis
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=float('inf')) -> Dict:
for i in range(min(6 , points_counts - 1) , __snake_case):
for j in range(max(0 , i - 6) , __snake_case):
UpperCamelCase__ : Optional[Any] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
UpperCamelCase__ : int = current_dis
return min_dis
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
if points_counts <= 3:
return dis_between_closest_pair(__snake_case , __snake_case)
# recursion
UpperCamelCase__ : str = points_counts // 2
UpperCamelCase__ : List[Any] = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[:mid] , __snake_case)
UpperCamelCase__ : Dict = closest_pair_of_points_sqr(
__snake_case , points_sorted_on_y[mid:] , points_counts - mid)
UpperCamelCase__ : Dict = min(__snake_case , __snake_case)
UpperCamelCase__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(__snake_case)
UpperCamelCase__ : Optional[Any] = dis_between_closest_in_strip(
__snake_case , len(__snake_case) , __snake_case)
return min(__snake_case , __snake_case)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = column_based_sort(__snake_case , column=0)
UpperCamelCase__ : Optional[Any] = column_based_sort(__snake_case , column=1)
return (
closest_pair_of_points_sqr(
__snake_case , __snake_case , __snake_case)
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 706 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=__lowerCamelCase ):
_lowerCamelCase = ['''torch''', '''scipy''']
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
requires_backends(self , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
requires_backends(cls , ['torch', 'scipy'])
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
requires_backends(cls , ['torch', 'scipy'])
| 6 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __lowercase :
_lowerCamelCase = True
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = '''PIL.Image.Image'''
_lowerCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowerCamelCase = field(default='''Image''' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self : Optional[int]):
return self.pa_type
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if isinstance(lowercase_ , lowercase_):
UpperCamelCase__ : str = np.array(lowercase_)
if isinstance(lowercase_ , lowercase_):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_)
elif isinstance(lowercase_ , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_)
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : dict , UpperCAmelCase_ : Any=None):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.')
if token_per_repo_id is None:
UpperCamelCase__ : Any = {}
UpperCamelCase__ : Optional[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(lowercase_):
UpperCamelCase__ : List[str] = PIL.Image.open(lowercase_)
else:
UpperCamelCase__ : Any = path.split('::')[-1]
try:
UpperCamelCase__ : Optional[Any] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL)["""repo_id"""]
UpperCamelCase__ : Tuple = token_per_repo_id.get(lowercase_)
except ValueError:
UpperCamelCase__ : Tuple = None
with xopen(lowercase_ , 'rb' , use_auth_token=lowercase_) as f:
UpperCamelCase__ : Any = BytesIO(f.read())
UpperCamelCase__ : str = PIL.Image.open(bytes_)
else:
UpperCamelCase__ : Optional[int] = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCamelCase ( self : Optional[Any]):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary'),
"path": Value('string'),
}
)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray]):
if pa.types.is_string(storage.type):
UpperCamelCase__ : Any = pa.array([None] * len(lowercase_) , type=pa.binary())
UpperCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCamelCase__ : Union[str, Any] = pa.array([None] * len(lowercase_) , type=pa.string())
UpperCamelCase__ : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
UpperCamelCase__ : List[str] = storage.field('bytes')
else:
UpperCamelCase__ : str = pa.array([None] * len(lowercase_) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
UpperCamelCase__ : List[str] = storage.field('path')
else:
UpperCamelCase__ : Tuple = pa.array([None] * len(lowercase_) , type=pa.string())
UpperCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
UpperCamelCase__ : List[Any] = pa.array(
[encode_np_array(np.array(lowercase_))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ : Dict = pa.array([None] * len(lowercase_) , type=pa.string())
UpperCamelCase__ : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowercase_ , self.pa_type)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : pa.StructArray):
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase_ : Any):
with xopen(lowercase_ , 'rb') as f:
UpperCamelCase__ : Any = f.read()
return bytes_
UpperCamelCase__ : Dict = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ : int = pa.array(
[os.path.basename(lowercase_) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
UpperCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(lowercase_ , self.pa_type)
def __UpperCAmelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ : Union[str, Any] = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def __UpperCAmelCase ( lowerCamelCase_) -> bytes:
UpperCamelCase__ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ : int = image.format
else:
UpperCamelCase__ : str = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__)
return buffer.getvalue()
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
if hasattr(UpperCAmelCase__ , 'filename') and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__)}
def __UpperCAmelCase ( lowerCamelCase_) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
UpperCamelCase__ : Dict = array.dtype
UpperCamelCase__ : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCamelCase__ : Any = dtype.kind
UpperCamelCase__ : Tuple = dtype.itemsize
UpperCamelCase__ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ : Dict = np.dtype('|u1')
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.')
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ : int = dtype_byteorder + dtype_kind + str(UpperCAmelCase__)
UpperCamelCase__ : Union[str, Any] = np.dtype(UpperCAmelCase__)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}')
UpperCamelCase__ : Optional[Any] = PIL.Image.fromarray(array.astype(UpperCAmelCase__))
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__)}
def __UpperCAmelCase ( lowerCamelCase_) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.')
if objs:
UpperCamelCase__ : Union[str, Any] = first_non_null_value(UpperCAmelCase__)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray):
UpperCamelCase__ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__)
return [obj_to_image_dict_func(UpperCAmelCase__) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image):
UpperCamelCase__ : List[str] = no_op_if_value_is_null(UpperCAmelCase__)
return [obj_to_image_dict_func(UpperCAmelCase__) for obj in objs]
else:
return objs
else:
return objs | 707 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import math
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1) -> qiskit.result.counts.Counts:
if (
isinstance(_lowercase , _lowercase)
or isinstance(_lowercase , _lowercase)
or isinstance(_lowercase , _lowercase)
):
raise TypeError('inputs must be integers.')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.')
if (
(math.floor(_lowercase) != input_a)
or (math.floor(_lowercase) != input_a)
or (math.floor(_lowercase) != carry_in)
):
raise ValueError('inputs must be exact integers.')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.')
# build registers
UpperCamelCase__ : Optional[int] = qiskit.QuantumRegister(4 , 'qr')
UpperCamelCase__ : Any = qiskit.ClassicalRegister(2 , 'cr')
# list the entries
UpperCamelCase__ : List[str] = [input_a, input_a, carry_in]
UpperCamelCase__ : List[str] = qiskit.QuantumCircuit(_lowercase , _lowercase)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(_lowercase) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , _lowercase) # measure the last two qbits
UpperCamelCase__ : int = qiskit.Aer.get_backend('aer_simulator')
UpperCamelCase__ : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1_000)
return job.result().get_counts(_lowercase)
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
lowerCAmelCase__ = {'''allegro/herbert-base-cased''': 514}
lowerCAmelCase__ = {}
class __lowercase (UpperCAmelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = HerbertTokenizer
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Union[str, Any]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : str="</s>" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Dict = [self.cls_token_id]
UpperCamelCase__ : Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase)) + [1]
return [1] + ([0] * len(__lowerCAmelCase)) + [1] + ([0] * len(__lowerCAmelCase)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : Union[str, Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase)
return tuple(__lowerCAmelCase)
| 709 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class __lowercase (__UpperCAmelCase ):
_lowerCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''image''': Image()} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "image"
_lowerCamelCase = "labels"
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Union[str, Any]):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.')
UpperCamelCase__ : Tuple = copy.deepcopy(self)
UpperCamelCase__ : Optional[int] = self.label_schema.copy()
UpperCamelCase__ : Optional[int] = features[self.label_column]
UpperCamelCase__ : str = label_schema
return task_template
@property
def __UpperCamelCase ( self : int):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 710 |
'''simple docstring'''
from __future__ import annotations
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list[list[int]]):
UpperCamelCase__ : int = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(UpperCAmelCase_) != 0:
UpperCamelCase__ : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase_) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise error
UpperCamelCase__ : Optional[int] = rows
else:
UpperCamelCase__ : Optional[Any] = []
def __UpperCamelCase ( self : Union[str, Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def __UpperCamelCase ( self : Dict):
return len(self.rows)
@property
def __UpperCamelCase ( self : Tuple):
return len(self.rows[0])
@property
def __UpperCamelCase ( self : List[Any]):
return (self.num_rows, self.num_columns)
@property
def __UpperCamelCase ( self : Any):
return self.order[0] == self.order[1]
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[int] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def __UpperCamelCase ( self : str):
return bool(self.determinant())
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
UpperCamelCase__ : Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(UpperCAmelCase_).determinant()
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
return -1 * self.get_minor(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
return Matrix(
[
[self.get_minor(UpperCAmelCase_ , UpperCAmelCase_) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def __UpperCamelCase ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : Any):
return str(self.rows)
def __str__( self : List[Any]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(UpperCAmelCase_) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : List[str] = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(UpperCAmelCase_)
else:
UpperCamelCase__ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int | None = None):
UpperCamelCase__ : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase_ , (int, float)):
raise type_error
if len(UpperCAmelCase_) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
UpperCamelCase__ : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
UpperCamelCase__ : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[Any] , UpperCAmelCase_ : object):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , UpperCAmelCase_ : object):
return not self == other
def __neg__( self : Union[str, Any]):
return self * -1
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : Tuple , UpperCAmelCase_ : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Any , UpperCAmelCase_ : Matrix | int | float):
if isinstance(UpperCAmelCase_ , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase_ , UpperCAmelCase_) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Dict , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
UpperCamelCase__ : str = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def __UpperCamelCase ( cls : Optional[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int]):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase_)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6 | 0 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __lowercase :
def __UpperCamelCase ( self : Union[str, Any]):
torch.manual_seed(0)
UpperCamelCase__ : Dict = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
UpperCamelCase__ : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
UpperCamelCase__ : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = inputs['prompt']
UpperCamelCase__ : List[Any] = inputs['generator']
UpperCamelCase__ : Tuple = inputs['num_inference_steps']
UpperCamelCase__ : List[Any] = inputs['output_type']
if "image" in inputs:
UpperCamelCase__ : Tuple = inputs['image']
else:
UpperCamelCase__ : Union[str, Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['mask_image']
else:
UpperCamelCase__ : int = None
if "original_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['original_image']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__, UpperCamelCase__ : Any = pipe.encode_prompt(UpperCAmelCase_)
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[Any] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCamelCase__ : Union[str, Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = inputs['generator']
UpperCamelCase__ : List[Any] = inputs['num_inference_steps']
UpperCamelCase__ : Optional[int] = inputs['output_type']
# inputs with prompt converted to embeddings
UpperCamelCase__ : Any = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
UpperCamelCase__ : Tuple = image
if mask_image is not None:
UpperCamelCase__ : Union[str, Any] = mask_image
if original_image is not None:
UpperCamelCase__ : str = original_image
UpperCamelCase__ : Union[str, Any] = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Dict = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCAmelCase_)
pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Any = pipe(**UpperCAmelCase_)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = self.pipeline_class.from_pretrained(UpperCAmelCase_)
pipe_loaded.to(UpperCAmelCase_)
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
UpperCamelCase__ : Any = self.get_dummy_inputs(UpperCAmelCase_)
UpperCamelCase__ : Tuple = pipe_loaded(**UpperCAmelCase_)[0]
UpperCamelCase__ : Optional[int] = np.abs(to_np(UpperCAmelCase_) - to_np(UpperCAmelCase_)).max()
self.assertLess(UpperCAmelCase_ , 1e-4)
| 6 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class __lowercase (__lowerCamelCase ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
| 712 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __UpperCAmelCase ( lowerCamelCase_) -> int:
print('Generating primitive root of p')
while True:
UpperCamelCase__ : Any = random.randrange(3 , lowerCamelCase_)
if pow(lowerCamelCase_ , 2 , lowerCamelCase_) == 1:
continue
if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) == 1:
continue
return g
def __UpperCAmelCase ( lowerCamelCase_) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...')
UpperCamelCase__ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase_) # select large prime number.
UpperCamelCase__ : Any = primitive_root(lowerCamelCase_) # one primitive root on modulo p.
UpperCamelCase__ : Union[str, Any] = random.randrange(3 , lowerCamelCase_) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
UpperCamelCase__ : List[Any] = (key_size, e_a, e_a, p)
UpperCamelCase__ : Optional[Any] = (key_size, d)
return public_key, private_key
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> None:
if os.path.exists(f'{name}_pubkey.txt') or os.path.exists(f'{name}_privkey.txt'):
print('\nWARNING:')
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.')
sys.exit()
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = generate_key(lowerCamelCase_)
print(f'\nWriting public key to file {name}_pubkey.txt...')
with open(f'{name}_pubkey.txt' , 'w') as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}')
print(f'Writing private key to file {name}_privkey.txt...')
with open(f'{name}_privkey.txt' , 'w') as fo:
fo.write(f'{private_key[0]},{private_key[1]}')
def __UpperCAmelCase ( ) -> None:
print('Making key files...')
make_key_files('elgamal' , 2_048)
print('Key files generation successful')
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> int:
UpperCamelCase__ : List[Any] = sorted(numsa + numsa)
UpperCamelCase__ : Tuple = divmod(len(lowerCamelCase_) , 2)
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [float(x) for x in input('Enter the elements of first array: ').split()]
lowerCAmelCase__ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 713 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> str:
for attribute in key.split('.'):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : str = 'lm_head'
UpperCamelCase__ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_)
if weight_type is not None:
UpperCamelCase__ : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_).shape
else:
UpperCamelCase__ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ : List[Any] = value
elif weight_type == "bias":
UpperCamelCase__ : Any = value
else:
UpperCamelCase__ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = fairseq_model.state_dict()
UpperCamelCase__ : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.')[-1] == name.split('.')[0]:
UpperCamelCase__ : Any = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(lowerCamelCase_)[0].split('.')[-2]
UpperCamelCase__ : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_)
if "weight_g" in name:
UpperCamelCase__ : int = 'weight_g'
elif "weight_v" in name:
UpperCamelCase__ : Any = 'weight_v'
elif "bias" in name:
UpperCamelCase__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Any = 'weight'
else:
UpperCamelCase__ : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
continue
if not is_used:
unused_weights.append(lowerCamelCase_)
logger.warning(f'Unused weights: {unused_weights}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Dict = full_name.split('conv_layers.')[-1]
UpperCamelCase__ : List[Any] = name.split('.')
UpperCamelCase__ : Any = int(items[0])
UpperCamelCase__ : int = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ : int = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase_)
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True) -> Tuple:
if config_path is not None:
UpperCamelCase__ : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_)
else:
UpperCamelCase__ : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : List[Any] = target_dict.pad_index
UpperCamelCase__ : Dict = target_dict.bos_index
UpperCamelCase__ : Union[str, Any] = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols)
UpperCamelCase__ : Dict = os.path.join(lowerCamelCase_ , 'vocab.json')
if not os.path.isdir(lowerCamelCase_):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_))
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
UpperCamelCase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Any = 42
UpperCamelCase__ : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
UpperCamelCase__ : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
UpperCamelCase__ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
UpperCamelCase__ : Dict = UniSpeechForCTC(lowerCamelCase_)
else:
UpperCamelCase__ : List[Any] = UniSpeechForPreTraining(lowerCamelCase_)
if is_finetuned:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1]), 'w2v_path': checkpoint_path})
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
UpperCamelCase__ : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
hf_unispeech.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 6 | 0 |
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase__ = getLogger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 8 , lowerCamelCase_ = 1_024 , lowerCamelCase_="val" , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_="summarization" , lowerCamelCase_=None , lowerCamelCase_=1 , lowerCamelCase_ = None , lowerCamelCase_="" , **lowerCamelCase_ , ) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = str(SCREAMING_SNAKE_CASE_)
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : str = Path(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Any = save_dir.joinpath(f'rank_{local_rank}_output.json')
torch.cuda.set_device(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_).cuda()
if fpaa:
UpperCamelCase__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # update config with task specific params
UpperCamelCase__ : int = generate_kwargs.pop('num_beams' , model.config.num_beams) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase__ : int = num_return_sequences
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_)
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}') # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase__ : int = tokenizer.model_max_length
if prefix is None:
UpperCamelCase__ : List[Any] = prefix or getattr(model.config , 'prefix' , '') or ''
UpperCamelCase__ : Optional[int] = SeqaSeqDataset(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_target_length=1_024 , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase__ : List[str] = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , add_extra_examples=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=ds.collate_fn)
UpperCamelCase__ : Optional[Any] = []
for batch in tqdm(SCREAMING_SNAKE_CASE_):
UpperCamelCase__ : List[str] = model.generate(
input_ids=batch['input_ids'].to(model.device) , attention_mask=batch['attention_mask'].to(model.device) , num_return_sequences=SCREAMING_SNAKE_CASE_ , num_beams=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[Any] = batch['ids']
if num_return_sequences > 1:
UpperCamelCase__ : str = chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(SCREAMING_SNAKE_CASE_):
results.append({'pred': pred, 'id': ids[i].item()})
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return results, sampler.num_replicas
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate')
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE_ , help='like cnn_dm/test.source')
parser.add_argument(
'--model_name' , type=SCREAMING_SNAKE_CASE_ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=SCREAMING_SNAKE_CASE_ , help='where to save' , default='tmp_gen')
parser.add_argument('--max_source_length' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_)
parser.add_argument(
'--type_path' , type=SCREAMING_SNAKE_CASE_ , default='test' , help='which subset to evaluate typically train/val/test')
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE_ , default='summarization' , help='used for task_specific_params + metrics')
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help='batch size')
parser.add_argument(
'--local_rank' , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help='should be passed by distributed.launch')
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='How many observations. Defaults to all.')
parser.add_argument(
'--num_return_sequences' , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help='How many sequences to return')
parser.add_argument(
'--sync_timeout' , type=SCREAMING_SNAKE_CASE_ , default=600 , required=SCREAMING_SNAKE_CASE_ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_)
parser.add_argument('--tgt_lang' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_)
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='will be added to the begininng of src examples')
parser.add_argument('--fp16' , action='store_true')
parser.add_argument('--debug' , action='store_true')
UpperCamelCase__ : Optional[int] = time.time()
UpperCamelCase__, UpperCamelCase__ : Optional[int] = parser.parse_known_args()
UpperCamelCase__ : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_)
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}')
UpperCamelCase__ : Tuple = Path(args.save_dir + '_tmp')
Path(SCREAMING_SNAKE_CASE_).mkdir(exist_ok=SCREAMING_SNAKE_CASE_) # this handles locking.
UpperCamelCase__ : Optional[Any] = list(json_save_dir.glob('rank_*.json'))
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.')
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase__ : Union[str, Any] = {}
if args.src_lang is not None:
UpperCamelCase__ : Optional[int] = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase__ : Union[str, Any] = args.tgt_lang
Path(args.save_dir).mkdir(exist_ok=SCREAMING_SNAKE_CASE_)
UpperCamelCase__, UpperCamelCase__ : Optional[int] = eval_data_dir(
args.data_dir , SCREAMING_SNAKE_CASE_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if args.local_rank <= 0:
UpperCamelCase__ : List[str] = Path(args.save_dir)
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : str = gather_results_from_each_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.sync_timeout)
UpperCamelCase__ : int = combine_partial_results(SCREAMING_SNAKE_CASE_)
if args.num_return_sequences > 1:
UpperCamelCase__ : Any = save_dir.joinpath('pseudolabel_results.json')
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/')
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return
UpperCamelCase__ : Optional[int] = Path(args.data_dir).joinpath(args.type_path + '.target')
with open(SCREAMING_SNAKE_CASE_) as f:
UpperCamelCase__ : Any = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE_)]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase__ : Tuple = 'translation' in args.task
UpperCamelCase__ : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase__ : List[Any] = 'bleu' if calc_bleu else 'rouge'
UpperCamelCase__ : Optional[int] = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Optional[Any] = time.time() - start_time
UpperCamelCase__ : List[str] = round(runtime / metrics['n_obs'] , 4)
UpperCamelCase__ : Optional[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase__ : Any = save_dir.joinpath(f'{args.type_path}_{metric_name}.json')
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_)
print(SCREAMING_SNAKE_CASE_)
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(f'{args.type_path}_generations.txt'))
if args.debug:
write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(f'{args.type_path}.target'))
else:
shutil.rmtree(SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : Tuple = []
for partial_result in partial_results:
records.extend(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCamelCase_: x["id"])
UpperCamelCase__ : Optional[int] = [x['pred'] for x in records]
return preds
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
# WAIT FOR lots of .json files
UpperCamelCase__ : Optional[int] = time.time()
logger.info('waiting for all nodes to finish')
UpperCamelCase__ : Tuple = None
while (time.time() - start_wait) < timeout:
UpperCamelCase__ : Tuple = list(save_dir.glob('rank_*.json'))
if len(SCREAMING_SNAKE_CASE_) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase__ : str = lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes')
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 714 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[str]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : Dict = (32, 32)
UpperCamelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase_)
return image
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCamelCase ( self : Any):
torch.manual_seed(0)
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCamelCase ( self : str):
torch.manual_seed(0)
UpperCamelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
def extract(*UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict):
class __lowercase :
def __init__( self : List[Any]):
UpperCamelCase__ : Optional[Any] = torch.ones([0])
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : int):
self.pixel_values.to(UpperCAmelCase_)
return self
return Out()
return extract
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any = self.dummy_cond_unet
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = self.dummy_vae
UpperCamelCase__ : str = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : List[Any] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : int = self.dummy_cond_unet
UpperCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = self.dummy_vae
UpperCamelCase__ : Optional[int] = self.dummy_text_encoder
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Dict = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Tuple = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[str] = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : str = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=UpperCAmelCase_ , )[0]
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=UpperCAmelCase_)
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert isinstance(pipe.scheduler , UpperCAmelCase_)
assert pipe.safety_checker is None
UpperCamelCase__ : List[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : Optional[Any] = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_)
UpperCamelCase__ : Any = self.dummy_vae
UpperCamelCase__ : Optional[Any] = self.dummy_text_encoder
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
UpperCamelCase__ : Any = unet.half()
UpperCamelCase__ : Tuple = vae.half()
UpperCamelCase__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Any = 'A painting of a squirrel eating a burger'
UpperCamelCase__ : int = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
UpperCamelCase__ : Any = 4_003_660_346
UpperCamelCase__ : Any = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : str = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=UpperCAmelCase_)
UpperCamelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
UpperCamelCase__ : Dict = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : str = 'padme amidala taking a bath artwork, safe for work, no nudity'
UpperCamelCase__ : Tuple = 2_734_971_755
UpperCamelCase__ : Tuple = 7
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : int = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
UpperCamelCase__ : List[str] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
UpperCamelCase__ : Optional[Any] = sd_pipe.to(UpperCAmelCase_)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : int = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
UpperCamelCase__ : Any = 1_044_355_234
UpperCamelCase__ : Optional[int] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : str = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-7
UpperCamelCase__ : int = torch.manual_seed(UpperCAmelCase_)
UpperCamelCase__ : List[str] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61])
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 6 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ = logging.getLogger(__name__)
class __lowercase (__snake_case ):
_lowerCamelCase = 'sequence-classification'
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]):
if type(A_) == dict:
UpperCamelCase__ : Any = Namespace(**A_)
UpperCamelCase__ : Optional[int] = glue_output_modes[hparams.task]
UpperCamelCase__ : Any = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode)
def __UpperCamelCase ( self : List[Any] , **UpperCAmelCase_ : int):
return self.model(**A_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ : List[Any] = self(**A_)
UpperCamelCase__ : Dict = outputs[0]
UpperCamelCase__ : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase__ : int = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Tuple = self.hparams
UpperCamelCase__ : Tuple = processors[args.task]()
UpperCamelCase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase__ : str = self._feature_file(A_)
if os.path.exists(A_) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , A_)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
UpperCamelCase__ : Optional[Any] = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
UpperCamelCase__ : Any = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , A_)
torch.save(A_ , A_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any = False):
UpperCamelCase__ : Dict = "dev" if mode == "test" else mode
UpperCamelCase__ : str = self._feature_file(A_)
logger.info('Loading features from cached file %s' , A_)
UpperCamelCase__ : Dict = torch.load(A_)
UpperCamelCase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
UpperCamelCase__ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
UpperCamelCase__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_) , batch_size=A_ , shuffle=A_ , )
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ : Tuple = self(**A_)
UpperCamelCase__ : Dict = outputs[:2]
UpperCamelCase__ : Dict = logits.detach().cpu().numpy()
UpperCamelCase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Tuple = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
UpperCamelCase__ : Optional[int] = np.concatenate([x['pred'] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : Any = np.argmax(A_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : List[str] = np.squeeze(A_)
UpperCamelCase__ : Union[str, Any] = np.concatenate([x['target'] for x in outputs] , axis=0)
UpperCamelCase__ : List[str] = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_)}
UpperCamelCase__ : Optional[Any] = dict(results.items())
UpperCamelCase__ : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : List[Any] = self._eval_end(A_)
UpperCamelCase__ : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = self._eval_end(A_)
UpperCamelCase__ : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
BaseTransformer.add_model_specific_args(A_ , A_)
parser.add_argument(
'--max_seq_length' , default=128 , type=A_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=A_ , required=A_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=A_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCamelCase__ : Dict = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd())
UpperCamelCase__ : Optional[int] = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase__ : List[Any] = os.path.join(
'./results' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S")}' , )
os.makedirs(args.output_dir)
UpperCamelCase__ : Optional[int] = GLUETransformer(_lowerCAmelCase)
UpperCamelCase__ : str = generic_train(_lowerCAmelCase , _lowerCAmelCase)
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt') , recursive=_lowerCAmelCase))
UpperCamelCase__ : Tuple = model.load_from_checkpoint(checkpoints[-1])
return trainer.test(_lowerCAmelCase)
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __lowercase (UpperCamelCase__ ):
def __init__( self : List[str]):
# test for the above condition
self.test()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Tuple = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase__ : str = self.advance()
if not self.does_advance(UpperCAmelCase_):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Any = self.update(UpperCAmelCase_)
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def __UpperCamelCase ( self : Tuple):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int]):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def __UpperCamelCase ( self : Optional[Any]):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def __UpperCamelCase ( self : Union[str, Any]):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any]=False):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class __lowercase (UpperCamelCase__ ):
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]):
super(UpperCAmelCase_ , self).__init__()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or len(UpperCAmelCase_) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.')
if any((not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or token_id < 0) for token_id in token_ids):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.')
UpperCamelCase__ : str = token_ids
UpperCamelCase__ : Union[str, Any] = len(self.token_ids)
UpperCamelCase__ : str = -1 # the index of the currently fulfilled step
UpperCamelCase__ : Optional[int] = False
def __UpperCamelCase ( self : Optional[int]):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[Any]):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_)}')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase_)}')
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : List[Any] = False
if self.does_advance(UpperCAmelCase_):
self.fulfilled_idx += 1
UpperCamelCase__ : Tuple = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[str] = completed
else:
# failed to make progress.
UpperCamelCase__ : Union[str, Any] = True
self.reset()
return stepped, completed, reset
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = 0
def __UpperCamelCase ( self : str):
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Dict=False):
UpperCamelCase__ : Optional[Any] = PhrasalConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : Tuple = self.seqlen
UpperCamelCase__ : Dict = self.fulfilled_idx
UpperCamelCase__ : List[Any] = self.completed
return new_constraint
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=True):
UpperCamelCase__ : Optional[Any] = max([len(UpperCAmelCase_) for one in nested_token_ids])
UpperCamelCase__ : Tuple = {}
for token_ids in nested_token_ids:
UpperCamelCase__ : List[Any] = root
for tidx, token_id in enumerate(UpperCAmelCase_):
if token_id not in level:
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : List[str] = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.')
UpperCamelCase__ : List[str] = root
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Union[str, Any] = self.trie
for current_token in current_seq:
UpperCamelCase__ : Any = start[current_token]
UpperCamelCase__ : Optional[int] = list(start.keys())
return next_tokens
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Any):
UpperCamelCase__ : List[str] = self.next_tokens(UpperCAmelCase_)
return len(UpperCAmelCase_) == 0
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Any = list(root.values())
if len(UpperCAmelCase_) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase_) for nn in next_nodes])
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : int = self.count_leaves(UpperCAmelCase_)
return len(UpperCAmelCase_) != leaf_count
class __lowercase (UpperCamelCase__ ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict):
super(UpperCAmelCase_ , self).__init__()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or len(UpperCAmelCase_) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.')
if any(not isinstance(UpperCAmelCase_ , UpperCAmelCase_) for token_ids in nested_token_ids):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.')
if any(
any((not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.')
UpperCamelCase__ : Optional[int] = DisjunctiveTrie(UpperCAmelCase_)
UpperCamelCase__ : int = nested_token_ids
UpperCamelCase__ : List[Any] = self.trie.max_height
UpperCamelCase__ : str = []
UpperCamelCase__ : Optional[Any] = False
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = self.trie.next_tokens(self.current_seq)
if len(UpperCAmelCase_) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_)}')
UpperCamelCase__ : Tuple = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[Any]):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase_)}')
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Tuple = False
if self.does_advance(UpperCAmelCase_):
self.current_seq.append(UpperCAmelCase_)
UpperCamelCase__ : str = True
else:
UpperCamelCase__ : int = True
self.reset()
UpperCamelCase__ : str = self.trie.reached_leaf(self.current_seq)
UpperCamelCase__ : Any = completed
return stepped, completed, reset
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : List[str] = []
def __UpperCamelCase ( self : List[Any]):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : int=False):
UpperCamelCase__ : int = DisjunctiveConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : Dict = self.seqlen
UpperCamelCase__ : Tuple = self.current_seq
UpperCamelCase__ : Optional[Any] = self.completed
return new_constraint
class __lowercase :
def __init__( self : Any , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase__ : Any = max([c.seqlen for c in constraints])
UpperCamelCase__ : Dict = len(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = False
self.init_state()
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Any = []
UpperCamelCase__ : int = None
UpperCamelCase__ : Optional[int] = [constraint.copy(stateful=UpperCAmelCase_) for constraint in self.constraints]
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase__ : str = constraint.advance()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
token_list.append(UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
token_list.extend(UpperCAmelCase_)
else:
UpperCamelCase__ : List[str] = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
token_list.append(UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
token_list.extend(UpperCAmelCase_)
if len(UpperCAmelCase_) == 0:
return None
else:
return token_list
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Dict):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.add(UpperCAmelCase_)
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : List[Any]):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.')
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = False, False
if self.completed:
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[str] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = self.inprogress_constraint.update(UpperCAmelCase_)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
UpperCamelCase__ : str = None
if len(self.pending_constraints) == 0:
# we're done!
UpperCamelCase__ : str = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(UpperCAmelCase_):
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = pending_constraint.update(UpperCAmelCase_)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = None
if not complete and stepped:
UpperCamelCase__ : List[str] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase__ : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase__ : Tuple = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int]=True):
UpperCamelCase__ : str = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase__ : Optional[int] = [
constraint.copy(stateful=UpperCAmelCase_) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase__ : Tuple = self.inprogress_constraint.copy(stateful=UpperCAmelCase_)
UpperCamelCase__ : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 716 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( lowerCamelCase_ = "AAPL") -> str:
UpperCamelCase__ : str = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
UpperCamelCase__ : Optional[Any] = BeautifulSoup(requests.get(lowerCamelCase_).text , 'html.parser')
UpperCamelCase__ : Union[str, Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 6 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (UpperCAmelCase__ , unittest.TestCase ):
_lowerCamelCase = CTRLTokenizer
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : List[str]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : str = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
UpperCamelCase__ : List[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
UpperCamelCase__ : Dict = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
UpperCamelCase__ : Any = {"unk_token": "<unk>"}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCamelCase__) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCamelCase__))
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : Optional[int]):
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : int = "adapt react readapt apt"
UpperCamelCase__ : Optional[int] = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCamelCase__ : int = "adapt react readapt apt"
UpperCamelCase__ : List[str] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
UpperCamelCase__ : str = tokenizer.tokenize(lowerCamelCase__)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__)
UpperCamelCase__ : Tuple = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__) , lowerCamelCase__)
| 717 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = Node(1)
UpperCamelCase__ : Union[str, Any] = Node(2)
UpperCamelCase__ : str = Node(3)
UpperCamelCase__ : Union[str, Any] = Node(4)
UpperCamelCase__ : Optional[Any] = Node(5)
return tree
def __UpperCAmelCase ( lowerCamelCase_) -> List[str]:
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
def __UpperCAmelCase ( lowerCamelCase_) -> str:
return (max(height(root.left) , height(root.right)) + 1) if root else 0
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
if root is None:
return output
UpperCamelCase__ : Union[str, Any] = deque([root])
while process_queue:
UpperCamelCase__ : Tuple = process_queue.popleft()
output.append(node.data)
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
return output
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Any:
UpperCamelCase__ : Union[str, Any] = []
def populate_output(lowerCamelCase_ , lowerCamelCase_) -> None:
if not root:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.left , level - 1)
populate_output(root.right , level - 1)
populate_output(lowercase__ , lowercase__)
return output
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase__ : List[Any] = []
def populate_output(lowerCamelCase_ , lowerCamelCase_) -> None:
if root is None:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.right , level - 1)
populate_output(root.left , level - 1)
populate_output(lowercase__ , lowercase__)
return output
def __UpperCAmelCase ( lowerCamelCase_) -> int:
if root is None:
return []
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : str = 0
UpperCamelCase__ : Union[str, Any] = height(lowercase__)
for h in range(1 , height_tree + 1):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase__ , lowercase__))
UpperCamelCase__ : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowercase__ , lowercase__))
UpperCamelCase__ : Any = 0
return output
def __UpperCAmelCase ( ) -> int: # Main function for testing.
UpperCamelCase__ : Optional[Any] = make_tree()
print(f'In-order Traversal: {inorder(lowercase__)}')
print(f'Pre-order Traversal: {preorder(lowercase__)}')
print(f'Post-order Traversal: {postorder(lowercase__)}' , '\n')
print(f'Height of Tree: {height(lowercase__)}' , '\n')
print('Complete Level Order Traversal: ')
print(level_order(lowercase__) , '\n')
print('Level-wise order Traversal: ')
for level in range(1 , height(lowercase__) + 1):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowercase__ , level=lowercase__))
print('\nZigZag order Traversal: ')
print(zigzag(lowercase__))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718 |
'''simple docstring'''
import argparse
import struct
import unittest
class __lowercase :
def __init__( self : Tuple , UpperCAmelCase_ : bytes):
UpperCamelCase__ : Dict = data
# Initialize hash values
UpperCamelCase__ : Any = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
UpperCamelCase__ : List[Any] = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
UpperCamelCase__ : Tuple = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : bytes):
UpperCamelCase__ : List[Any] = B'\x80' + (B'\x00' * (63 - (len(UpperCAmelCase_) + 8) % 64))
UpperCamelCase__ : List[Any] = struct.pack('>Q' , (len(UpperCAmelCase_) * 8))
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Union[str, Any]):
# Convert into blocks of 64 bytes
UpperCamelCase__ : int = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ : Tuple = list(struct.unpack('>16L' , UpperCAmelCase_))
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : str = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ : Dict = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
UpperCamelCase__ : Tuple = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
UpperCamelCase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
UpperCamelCase__ : Optional[Any] = self.ror(UpperCAmelCase_ , 6) ^ self.ror(UpperCAmelCase_ , 11) ^ self.ror(UpperCAmelCase_ , 25)
UpperCamelCase__ : List[str] = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
UpperCamelCase__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
UpperCamelCase__ : List[str] = self.ror(UpperCAmelCase_ , 2) ^ self.ror(UpperCAmelCase_ , 13) ^ self.ror(UpperCAmelCase_ , 22)
UpperCamelCase__ : Dict = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ : List[str] = (sa + maj) % 0X100_000_000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
UpperCamelCase__ : List[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes)
]
UpperCamelCase__ : Any = ''.join([hex(UpperCAmelCase_)[2:].zfill(8) for value in self.hashes])
def __UpperCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : int):
import hashlib
UpperCamelCase__ : str = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(UpperCAmelCase_).hash , hashlib.shaaaa(UpperCAmelCase_).hexdigest())
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file')
UpperCamelCase__ : List[str] = parser.parse_args()
UpperCamelCase__ : str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb') as f:
UpperCamelCase__ : Any = f.read()
else:
UpperCamelCase__ : List[Any] = bytes(lowerCamelCase_ , 'utf-8')
print(SHAaaa(lowerCamelCase_).hash)
if __name__ == "__main__":
main()
| 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.