text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import builtins
import difflib
import inspect
import logging
import math
import re
from collections.abc import Mapping
from importlib import import_module
from types import ModuleType
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from .utils import BASE_BUILTIN_MODULES, truncate_content
logger = logging.getLogger(__name__)
class InterpreterError(ValueError):
"""
An error raised when the interpreter cannot evaluate a Python expression, due to syntax error or unsupported
operations.
"""
pass
ERRORS = {
name: getattr(builtins, name)
for name in dir(builtins)
if isinstance(getattr(builtins, name), type) and issubclass(getattr(builtins, name), BaseException)
}
DEFAULT_MAX_LEN_OUTPUT = 50000
MAX_OPERATIONS = 10000000
MAX_WHILE_ITERATIONS = 1000000
def custom_print(*args):
return None
BASE_PYTHON_TOOLS = {
"print": custom_print,
"isinstance": isinstance,
"range": range,
"float": float,
"int": int,
"bool": bool,
"str": str,
"set": set,
"list": list,
"dict": dict,
"tuple": tuple,
"round": round,
"ceil": math.ceil,
"floor": math.floor,
"log": math.log,
"exp": math.exp,
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"atan2": math.atan2,
"degrees": math.degrees,
"radians": math.radians,
"pow": math.pow,
"sqrt": math.sqrt,
"len": len,
"sum": sum,
"max": max,
"min": min,
"abs": abs,
"enumerate": enumerate,
"zip": zip,
"reversed": reversed,
"sorted": sorted,
"all": all,
"any": any,
"map": map,
"filter": filter,
"ord": ord,
"chr": chr,
"next": next,
"iter": iter,
"divmod": divmod,
"callable": callable,
"getattr": getattr,
"hasattr": hasattr,
"setattr": setattr,
"issubclass": issubclass,
"type": type,
"complex": complex,
}
class PrintContainer:
def __init__(self):
self.value = ""
def append(self, text):
self.value += text
return self
def __iadd__(self, other):
"""Implements the += operator"""
self.value += str(other)
return self
def __str__(self):
"""String representation"""
return self.value
def __repr__(self):
"""Representation for debugging"""
return f"PrintContainer({self.value})"
def __len__(self):
"""Implements len() function support"""
return len(self.value)
class BreakException(Exception):
pass
class ContinueException(Exception):
pass
class ReturnException(Exception):
def __init__(self, value):
self.value = value
def get_iterable(obj):
if isinstance(obj, list):
return obj
elif hasattr(obj, "__iter__"):
return list(obj)
else:
raise InterpreterError("Object is not iterable")
def fix_final_answer_code(code: str) -> str:
"""
Sometimes an LLM can try to assign a variable to final_answer, which would break the final_answer() tool.
This function fixes this behaviour by replacing variable assignments to final_answer with final_answer_variable,
while preserving function calls to final_answer().
"""
# First, find if there's a direct assignment to final_answer
# Use word boundary and negative lookbehind to ensure it's not an object attribute
assignment_pattern = r"(?<!\.)(?<!\w)\bfinal_answer\s*="
if "final_answer(" not in code or not re.search(assignment_pattern, code):
# If final_answer tool is not called in this blob, then doing the replacement is hazardous because it could false the model's memory for next steps.
# Let's not modify the code and leave the subsequent assignment error happen.
return code
# Pattern for replacing variable assignments
# Looks for 'final_answer' followed by '=' with optional whitespace
# Negative lookbehind ensures we don't match object attributes
assignment_regex = r"(?<!\.)(?<!\w)(\bfinal_answer)(\s*=)"
code = re.sub(assignment_regex, r"final_answer_variable\2", code)
# Pattern for replacing variable usage but not function calls
# Negative lookahead (?!\s*\() ensures we don't match function calls
# Negative lookbehind (?<!\.|\w) ensures we don't match object methods or other variables
variable_regex = r"(?<!\.)(?<!\w)(\bfinal_answer\b)(?!\s*\()"
code = re.sub(variable_regex, "final_answer_variable", code)
return code
def evaluate_unaryop(
expression: ast.UnaryOp,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
operand = evaluate_ast(expression.operand, state, static_tools, custom_tools, authorized_imports)
if isinstance(expression.op, ast.USub):
return -operand
elif isinstance(expression.op, ast.UAdd):
return operand
elif isinstance(expression.op, ast.Not):
return not operand
elif isinstance(expression.op, ast.Invert):
return ~operand
else:
raise InterpreterError(f"Unary operation {expression.op.__class__.__name__} is not supported.")
def evaluate_lambda(
lambda_expression: ast.Lambda,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Callable:
args = [arg.arg for arg in lambda_expression.args.args]
def lambda_func(*values: Any) -> Any:
new_state = state.copy()
for arg, value in zip(args, values):
new_state[arg] = value
return evaluate_ast(
lambda_expression.body,
new_state,
static_tools,
custom_tools,
authorized_imports,
)
return lambda_func
def evaluate_while(
while_loop: ast.While,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
iterations = 0
while evaluate_ast(while_loop.test, state, static_tools, custom_tools, authorized_imports):
for node in while_loop.body:
try:
evaluate_ast(node, state, static_tools, custom_tools, authorized_imports)
except BreakException:
return None
except ContinueException:
break
iterations += 1
if iterations > MAX_WHILE_ITERATIONS:
raise InterpreterError(f"Maximum number of {MAX_WHILE_ITERATIONS} iterations in While loop exceeded")
return None
def create_function(
func_def: ast.FunctionDef,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Callable:
def new_func(*args: Any, **kwargs: Any) -> Any:
func_state = state.copy()
arg_names = [arg.arg for arg in func_def.args.args]
default_values = [
evaluate_ast(d, state, static_tools, custom_tools, authorized_imports) for d in func_def.args.defaults
]
# Apply default values
defaults = dict(zip(arg_names[-len(default_values) :], default_values))
# Set positional arguments
for name, value in zip(arg_names, args):
func_state[name] = value
# Set keyword arguments
for name, value in kwargs.items():
func_state[name] = value
# Handle variable arguments
if func_def.args.vararg:
vararg_name = func_def.args.vararg.arg
func_state[vararg_name] = args
if func_def.args.kwarg:
kwarg_name = func_def.args.kwarg.arg
func_state[kwarg_name] = kwargs
# Set default values for arguments that were not provided
for name, value in defaults.items():
if name not in func_state:
func_state[name] = value
# Update function state with self and __class__
if func_def.args.args and func_def.args.args[0].arg == "self":
if args:
func_state["self"] = args[0]
func_state["__class__"] = args[0].__class__
result = None
try:
for stmt in func_def.body:
result = evaluate_ast(stmt, func_state, static_tools, custom_tools, authorized_imports)
except ReturnException as e:
result = e.value
if func_def.name == "__init__":
return None
return result
return new_func
def evaluate_function_def(
func_def: ast.FunctionDef,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Callable:
custom_tools[func_def.name] = create_function(func_def, state, static_tools, custom_tools, authorized_imports)
return custom_tools[func_def.name]
def evaluate_class_def(
class_def: ast.ClassDef,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> type:
class_name = class_def.name
bases = [evaluate_ast(base, state, static_tools, custom_tools, authorized_imports) for base in class_def.bases]
class_dict = {}
for stmt in class_def.body:
if isinstance(stmt, ast.FunctionDef):
class_dict[stmt.name] = evaluate_function_def(stmt, state, static_tools, custom_tools, authorized_imports)
elif isinstance(stmt, ast.Assign):
for target in stmt.targets:
if isinstance(target, ast.Name):
class_dict[target.id] = evaluate_ast(
stmt.value,
state,
static_tools,
custom_tools,
authorized_imports,
)
elif isinstance(target, ast.Attribute):
class_dict[target.attr] = evaluate_ast(
stmt.value,
state,
static_tools,
custom_tools,
authorized_imports,
)
else:
raise InterpreterError(f"Unsupported statement in class body: {stmt.__class__.__name__}")
new_class = type(class_name, tuple(bases), class_dict)
state[class_name] = new_class
return new_class
def evaluate_augassign(
expression: ast.AugAssign,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
def get_current_value(target: ast.AST) -> Any:
if isinstance(target, ast.Name):
return state.get(target.id, 0)
elif isinstance(target, ast.Subscript):
obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports)
key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports)
return obj[key]
elif isinstance(target, ast.Attribute):
obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports)
return getattr(obj, target.attr)
elif isinstance(target, ast.Tuple):
return tuple(get_current_value(elt) for elt in target.elts)
elif isinstance(target, ast.List):
return [get_current_value(elt) for elt in target.elts]
else:
raise InterpreterError("AugAssign not supported for {type(target)} targets.")
current_value = get_current_value(expression.target)
value_to_add = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
if isinstance(expression.op, ast.Add):
if isinstance(current_value, list):
if not isinstance(value_to_add, list):
raise InterpreterError(f"Cannot add non-list value {value_to_add} to a list.")
current_value += value_to_add
else:
current_value += value_to_add
elif isinstance(expression.op, ast.Sub):
current_value -= value_to_add
elif isinstance(expression.op, ast.Mult):
current_value *= value_to_add
elif isinstance(expression.op, ast.Div):
current_value /= value_to_add
elif isinstance(expression.op, ast.Mod):
current_value %= value_to_add
elif isinstance(expression.op, ast.Pow):
current_value **= value_to_add
elif isinstance(expression.op, ast.FloorDiv):
current_value //= value_to_add
elif isinstance(expression.op, ast.BitAnd):
current_value &= value_to_add
elif isinstance(expression.op, ast.BitOr):
current_value |= value_to_add
elif isinstance(expression.op, ast.BitXor):
current_value ^= value_to_add
elif isinstance(expression.op, ast.LShift):
current_value <<= value_to_add
elif isinstance(expression.op, ast.RShift):
current_value >>= value_to_add
else:
raise InterpreterError(f"Operation {type(expression.op).__name__} is not supported.")
# Update the state: current_value has been updated in-place
set_value(
expression.target,
current_value,
state,
static_tools,
custom_tools,
authorized_imports,
)
return current_value
def evaluate_boolop(
node: ast.BoolOp,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> bool:
if isinstance(node.op, ast.And):
for value in node.values:
if not evaluate_ast(value, state, static_tools, custom_tools, authorized_imports):
return False
return True
elif isinstance(node.op, ast.Or):
for value in node.values:
if evaluate_ast(value, state, static_tools, custom_tools, authorized_imports):
return True
return False
def evaluate_binop(
binop: ast.BinOp,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
# Recursively evaluate the left and right operands
left_val = evaluate_ast(binop.left, state, static_tools, custom_tools, authorized_imports)
right_val = evaluate_ast(binop.right, state, static_tools, custom_tools, authorized_imports)
# Determine the operation based on the type of the operator in the BinOp
if isinstance(binop.op, ast.Add):
return left_val + right_val
elif isinstance(binop.op, ast.Sub):
return left_val - right_val
elif isinstance(binop.op, ast.Mult):
return left_val * right_val
elif isinstance(binop.op, ast.Div):
return left_val / right_val
elif isinstance(binop.op, ast.Mod):
return left_val % right_val
elif isinstance(binop.op, ast.Pow):
return left_val**right_val
elif isinstance(binop.op, ast.FloorDiv):
return left_val // right_val
elif isinstance(binop.op, ast.BitAnd):
return left_val & right_val
elif isinstance(binop.op, ast.BitOr):
return left_val | right_val
elif isinstance(binop.op, ast.BitXor):
return left_val ^ right_val
elif isinstance(binop.op, ast.LShift):
return left_val << right_val
elif isinstance(binop.op, ast.RShift):
return left_val >> right_val
else:
raise NotImplementedError(f"Binary operation {type(binop.op).__name__} is not implemented.")
def evaluate_assign(
assign: ast.Assign,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
result = evaluate_ast(assign.value, state, static_tools, custom_tools, authorized_imports)
if len(assign.targets) == 1:
target = assign.targets[0]
set_value(target, result, state, static_tools, custom_tools, authorized_imports)
else:
if len(assign.targets) != len(result):
raise InterpreterError(f"Assign failed: expected {len(result)} values but got {len(assign.targets)}.")
expanded_values = []
for tgt in assign.targets:
if isinstance(tgt, ast.Starred):
expanded_values.extend(result)
else:
expanded_values.append(result)
for tgt, val in zip(assign.targets, expanded_values):
set_value(tgt, val, state, static_tools, custom_tools, authorized_imports)
return result
def set_value(
target: ast.AST,
value: Any,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
if isinstance(target, ast.Name):
if target.id in static_tools:
raise InterpreterError(f"Cannot assign to name '{target.id}': doing this would erase the existing tool!")
state[target.id] = value
elif isinstance(target, ast.Tuple):
if not isinstance(value, tuple):
if hasattr(value, "__iter__") and not isinstance(value, (str, bytes)):
value = tuple(value)
else:
raise InterpreterError("Cannot unpack non-tuple value")
if len(target.elts) != len(value):
raise InterpreterError("Cannot unpack tuple of wrong size")
for i, elem in enumerate(target.elts):
set_value(elem, value[i], state, static_tools, custom_tools, authorized_imports)
elif isinstance(target, ast.Subscript):
obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports)
key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports)
obj[key] = value
elif isinstance(target, ast.Attribute):
obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports)
setattr(obj, target.attr, value)
def evaluate_call(
call: ast.Call,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
if not (
isinstance(call.func, ast.Attribute) or isinstance(call.func, ast.Name) or isinstance(call.func, ast.Subscript)
):
raise InterpreterError(f"This is not a correct function: {call.func}).")
if isinstance(call.func, ast.Attribute):
obj = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports)
func_name = call.func.attr
if not hasattr(obj, func_name):
raise InterpreterError(f"Object {obj} has no attribute {func_name}")
func = getattr(obj, func_name)
elif isinstance(call.func, ast.Name):
func_name = call.func.id
if func_name in state:
func = state[func_name]
elif func_name in static_tools:
func = static_tools[func_name]
elif func_name in custom_tools:
func = custom_tools[func_name]
elif func_name in ERRORS:
func = ERRORS[func_name]
else:
raise InterpreterError(
f"It is not permitted to evaluate other functions than the provided tools or functions defined/imported in previous code (tried to execute {call.func.id})."
)
elif isinstance(call.func, ast.Subscript):
value = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports)
index = evaluate_ast(call.func.slice, state, static_tools, custom_tools, authorized_imports)
if isinstance(value, (list, tuple)):
func = value[index]
else:
raise InterpreterError(f"Cannot subscript object of type {type(value).__name__}")
if not callable(func):
raise InterpreterError(f"This is not a correct function: {call.func}).")
func_name = None
args = []
for arg in call.args:
if isinstance(arg, ast.Starred):
args.extend(evaluate_ast(arg.value, state, static_tools, custom_tools, authorized_imports))
else:
args.append(evaluate_ast(arg, state, static_tools, custom_tools, authorized_imports))
kwargs = {
keyword.arg: evaluate_ast(keyword.value, state, static_tools, custom_tools, authorized_imports)
for keyword in call.keywords
}
if func_name == "super":
if not args:
if "__class__" in state and "self" in state:
return super(state["__class__"], state["self"])
else:
raise InterpreterError("super() needs at least one argument")
cls = args[0]
if not isinstance(cls, type):
raise InterpreterError("super() argument 1 must be type")
if len(args) == 1:
return super(cls)
elif len(args) == 2:
instance = args[1]
return super(cls, instance)
else:
raise InterpreterError("super() takes at most 2 arguments")
else:
if func_name == "print":
state["_print_outputs"] += " ".join(map(str, args)) + "\n"
return None
else: # Assume it's a callable object
if (
(inspect.getmodule(func) == builtins)
and inspect.isbuiltin(func)
and (func not in static_tools.values())
):
raise InterpreterError(
f"Invoking a builtin function that has not been explicitly added as a tool is not allowed ({func_name})."
)
return func(*args, **kwargs)
def evaluate_subscript(
subscript: ast.Subscript,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
index = evaluate_ast(subscript.slice, state, static_tools, custom_tools, authorized_imports)
value = evaluate_ast(subscript.value, state, static_tools, custom_tools, authorized_imports)
if isinstance(value, str) and isinstance(index, str):
raise InterpreterError("You're trying to subscript a string with a string index, which is impossible")
if isinstance(value, pd.core.indexing._LocIndexer):
parent_object = value.obj
return parent_object.loc[index]
if isinstance(value, pd.core.indexing._iLocIndexer):
parent_object = value.obj
return parent_object.iloc[index]
if isinstance(value, (pd.DataFrame, pd.Series, np.ndarray)):
return value[index]
elif isinstance(value, pd.core.groupby.generic.DataFrameGroupBy):
return value[index]
elif isinstance(index, slice):
return value[index]
elif isinstance(value, (list, tuple)):
if not (-len(value) <= index < len(value)):
raise InterpreterError(f"Index {index} out of bounds for list of length {len(value)}")
return value[int(index)]
elif isinstance(value, str):
if not (-len(value) <= index < len(value)):
raise InterpreterError(f"Index {index} out of bounds for string of length {len(value)}")
return value[index]
elif index in value:
return value[index]
else:
error_message = f"Could not index {value} with '{index}'."
if isinstance(index, str) and isinstance(value, Mapping):
close_matches = difflib.get_close_matches(index, list(value.keys()))
if len(close_matches) > 0:
error_message += f" Maybe you meant one of these indexes instead: {str(close_matches)}"
raise InterpreterError(error_message)
def evaluate_name(
name: ast.Name,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
if name.id in state:
return state[name.id]
elif name.id in static_tools:
return static_tools[name.id]
elif name.id in custom_tools:
return custom_tools[name.id]
elif name.id in ERRORS:
return ERRORS[name.id]
close_matches = difflib.get_close_matches(name.id, list(state.keys()))
if len(close_matches) > 0:
return state[close_matches[0]]
raise InterpreterError(f"The variable `{name.id}` is not defined.")
def evaluate_condition(
condition: ast.Compare,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> bool:
left = evaluate_ast(condition.left, state, static_tools, custom_tools, authorized_imports)
comparators = [
evaluate_ast(c, state, static_tools, custom_tools, authorized_imports) for c in condition.comparators
]
ops = [type(op) for op in condition.ops]
result = True
current_left = left
for op, comparator in zip(ops, comparators):
if op == ast.Eq:
current_result = current_left == comparator
elif op == ast.NotEq:
current_result = current_left != comparator
elif op == ast.Lt:
current_result = current_left < comparator
elif op == ast.LtE:
current_result = current_left <= comparator
elif op == ast.Gt:
current_result = current_left > comparator
elif op == ast.GtE:
current_result = current_left >= comparator
elif op == ast.Is:
current_result = current_left is comparator
elif op == ast.IsNot:
current_result = current_left is not comparator
elif op == ast.In:
current_result = current_left in comparator
elif op == ast.NotIn:
current_result = current_left not in comparator
else:
raise InterpreterError(f"Operator not supported: {op}")
result = result & current_result
current_left = comparator
if isinstance(result, bool) and not result:
break
return result if isinstance(result, (bool, pd.Series)) else result.all()
def evaluate_if(
if_statement: ast.If,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
result = None
test_result = evaluate_ast(if_statement.test, state, static_tools, custom_tools, authorized_imports)
if test_result:
for line in if_statement.body:
line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports)
if line_result is not None:
result = line_result
else:
for line in if_statement.orelse:
line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports)
if line_result is not None:
result = line_result
return result
def evaluate_for(
for_loop: ast.For,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Any:
result = None
iterator = evaluate_ast(for_loop.iter, state, static_tools, custom_tools, authorized_imports)
for counter in iterator:
set_value(
for_loop.target,
counter,
state,
static_tools,
custom_tools,
authorized_imports,
)
for node in for_loop.body:
try:
line_result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports)
if line_result is not None:
result = line_result
except BreakException:
break
except ContinueException:
continue
else:
continue
break
return result
def evaluate_listcomp(
listcomp: ast.ListComp,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> List[Any]:
def inner_evaluate(generators: List[ast.comprehension], index: int, current_state: Dict[str, Any]) -> List[Any]:
if index >= len(generators):
return [
evaluate_ast(
listcomp.elt,
current_state,
static_tools,
custom_tools,
authorized_imports,
)
]
generator = generators[index]
iter_value = evaluate_ast(
generator.iter,
current_state,
static_tools,
custom_tools,
authorized_imports,
)
result = []
for value in iter_value:
new_state = current_state.copy()
if isinstance(generator.target, ast.Tuple):
for idx, elem in enumerate(generator.target.elts):
new_state[elem.id] = value[idx]
else:
new_state[generator.target.id] = value
if all(
evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports)
for if_clause in generator.ifs
):
result.extend(inner_evaluate(generators, index + 1, new_state))
return result
return inner_evaluate(listcomp.generators, 0, state)
def evaluate_try(
try_node: ast.Try,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
try:
for stmt in try_node.body:
evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports)
except Exception as e:
matched = False
for handler in try_node.handlers:
if handler.type is None or isinstance(
e,
evaluate_ast(handler.type, state, static_tools, custom_tools, authorized_imports),
):
matched = True
if handler.name:
state[handler.name] = e
for stmt in handler.body:
evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports)
break
if not matched:
raise e
else:
if try_node.orelse:
for stmt in try_node.orelse:
evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports)
finally:
if try_node.finalbody:
for stmt in try_node.finalbody:
evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports)
def evaluate_raise(
raise_node: ast.Raise,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
if raise_node.exc is not None:
exc = evaluate_ast(raise_node.exc, state, static_tools, custom_tools, authorized_imports)
else:
exc = None
if raise_node.cause is not None:
cause = evaluate_ast(raise_node.cause, state, static_tools, custom_tools, authorized_imports)
else:
cause = None
if exc is not None:
if cause is not None:
raise exc from cause
else:
raise exc
else:
raise InterpreterError("Re-raise is not supported without an active exception")
def evaluate_assert(
assert_node: ast.Assert,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
test_result = evaluate_ast(assert_node.test, state, static_tools, custom_tools, authorized_imports)
if not test_result:
if assert_node.msg:
msg = evaluate_ast(assert_node.msg, state, static_tools, custom_tools, authorized_imports)
raise AssertionError(msg)
else:
# Include the failing condition in the assertion message
test_code = ast.unparse(assert_node.test)
raise AssertionError(f"Assertion failed: {test_code}")
def evaluate_with(
with_node: ast.With,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
contexts = []
for item in with_node.items:
context_expr = evaluate_ast(item.context_expr, state, static_tools, custom_tools, authorized_imports)
if item.optional_vars:
state[item.optional_vars.id] = context_expr.__enter__()
contexts.append(state[item.optional_vars.id])
else:
context_var = context_expr.__enter__()
contexts.append(context_var)
try:
for stmt in with_node.body:
evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports)
except Exception as e:
for context in reversed(contexts):
context.__exit__(type(e), e, e.__traceback__)
raise
else:
for context in reversed(contexts):
context.__exit__(None, None, None)
def get_safe_module(raw_module, dangerous_patterns, authorized_imports, visited=None):
"""Creates a safe copy of a module or returns the original if it's a function"""
# If it's a function or non-module object, return it directly
if not isinstance(raw_module, ModuleType):
return raw_module
# Handle circular references: Initialize visited set for the first call
if visited is None:
visited = set()
module_id = id(raw_module)
if module_id in visited:
return raw_module # Return original for circular refs
visited.add(module_id)
# Create new module for actual modules
safe_module = ModuleType(raw_module.__name__)
# Copy all attributes by reference, recursively checking modules
for attr_name in dir(raw_module):
# Skip dangerous patterns at any level
if any(
pattern in raw_module.__name__.split(".") + [attr_name] and pattern not in authorized_imports
for pattern in dangerous_patterns
):
logger.info(f"Skipping dangerous attribute {raw_module.__name__}.{attr_name}")
continue
try:
attr_value = getattr(raw_module, attr_name)
except ImportError as e:
# lazy / dynamic loading module -> INFO log and skip
logger.info(
f"Skipping import error while copying {raw_module.__name__}.{attr_name}: {type(e).__name__} - {e}"
)
continue
# Recursively process nested modules, passing visited set
if isinstance(attr_value, ModuleType):
attr_value = get_safe_module(attr_value, dangerous_patterns, authorized_imports, visited=visited)
setattr(safe_module, attr_name, attr_value)
return safe_module
def import_modules(expression, state, authorized_imports):
dangerous_patterns = (
"_os",
"os",
"subprocess",
"_subprocess",
"pty",
"system",
"popen",
"spawn",
"shutil",
"sys",
"pathlib",
"io",
"socket",
"compile",
"eval",
"exec",
"multiprocessing",
)
def check_module_authorized(module_name):
if "*" in authorized_imports:
return True
else:
module_path = module_name.split(".")
if any([module in dangerous_patterns and module not in authorized_imports for module in module_path]):
return False
module_subpaths = [".".join(module_path[:i]) for i in range(1, len(module_path) + 1)]
return any(subpath in authorized_imports for subpath in module_subpaths)
if isinstance(expression, ast.Import):
for alias in expression.names:
if check_module_authorized(alias.name):
raw_module = import_module(alias.name)
state[alias.asname or alias.name] = get_safe_module(raw_module, dangerous_patterns, authorized_imports)
else:
raise InterpreterError(
f"Import of {alias.name} is not allowed. Authorized imports are: {str(authorized_imports)}"
)
return None
elif isinstance(expression, ast.ImportFrom):
if check_module_authorized(expression.module):
raw_module = __import__(expression.module, fromlist=[alias.name for alias in expression.names])
module = get_safe_module(raw_module, dangerous_patterns, authorized_imports)
if expression.names[0].name == "*": # Handle "from module import *"
if hasattr(module, "__all__"): # If module has __all__, import only those names
for name in module.__all__:
state[name] = getattr(module, name)
else: # If no __all__, import all public names (those not starting with '_')
for name in dir(module):
if not name.startswith("_"):
state[name] = getattr(module, name)
else: # regular from imports
for alias in expression.names:
if hasattr(module, alias.name):
state[alias.asname or alias.name] = getattr(module, alias.name)
else:
raise InterpreterError(f"Module {expression.module} has no attribute {alias.name}")
else:
raise InterpreterError(
f"Import from {expression.module} is not allowed. Authorized imports are: {str(authorized_imports)}"
)
return None
def evaluate_dictcomp(
dictcomp: ast.DictComp,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> Dict[Any, Any]:
result = {}
for gen in dictcomp.generators:
iter_value = evaluate_ast(gen.iter, state, static_tools, custom_tools, authorized_imports)
for value in iter_value:
new_state = state.copy()
set_value(
gen.target,
value,
new_state,
static_tools,
custom_tools,
authorized_imports,
)
if all(
evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports)
for if_clause in gen.ifs
):
key = evaluate_ast(
dictcomp.key,
new_state,
static_tools,
custom_tools,
authorized_imports,
)
val = evaluate_ast(
dictcomp.value,
new_state,
static_tools,
custom_tools,
authorized_imports,
)
result[key] = val
return result
def evaluate_delete(
delete_node: ast.Delete,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str],
) -> None:
"""
Evaluate a delete statement (del x, del x[y]).
Args:
delete_node: The AST Delete node to evaluate
state: The current state dictionary
static_tools: Dictionary of static tools
custom_tools: Dictionary of custom tools
authorized_imports: List of authorized imports
"""
for target in delete_node.targets:
if isinstance(target, ast.Name):
# Handle simple variable deletion (del x)
if target.id in state:
del state[target.id]
else:
raise InterpreterError(f"Cannot delete name '{target.id}': name is not defined")
elif isinstance(target, ast.Subscript):
# Handle index/key deletion (del x[y])
obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports)
index = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports)
try:
del obj[index]
except (TypeError, KeyError, IndexError) as e:
raise InterpreterError(f"Cannot delete index/key: {str(e)}")
else:
raise InterpreterError(f"Deletion of {type(target).__name__} targets is not supported")
def evaluate_ast(
expression: ast.AST,
state: Dict[str, Any],
static_tools: Dict[str, Callable],
custom_tools: Dict[str, Callable],
authorized_imports: List[str] = BASE_BUILTIN_MODULES,
):
"""
Evaluate an abstract syntax tree using the content of the variables stored in a state and only evaluating a given
set of functions.
This function will recurse through the nodes of the tree provided.
Args:
expression (`ast.AST`):
The code to evaluate, as an abstract syntax tree.
state (`Dict[str, Any]`):
A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation
encounters assignments.
static_tools (`Dict[str, Callable]`):
Functions that may be called during the evaluation. Trying to change one of these static_tools will raise an error.
custom_tools (`Dict[str, Callable]`):
Functions that may be called during the evaluation. These static_tools can be overwritten.
authorized_imports (`List[str]`):
The list of modules that can be imported by the code. By default, only a few safe modules are allowed.
If it contains "*", it will authorize any import. Use this at your own risk!
"""
if state["_operations_count"] >= MAX_OPERATIONS:
raise InterpreterError(
f"Reached the max number of operations of {MAX_OPERATIONS}. Maybe there is an infinite loop somewhere in the code, or you're just asking too many calculations."
)
state["_operations_count"] += 1
if isinstance(expression, ast.Assign):
# Assignment -> we evaluate the assignment which should update the state
# We return the variable assigned as it may be used to determine the final result.
return evaluate_assign(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.AugAssign):
return evaluate_augassign(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Call):
# Function call -> we return the value of the function call
return evaluate_call(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Constant):
# Constant -> just return the value
return expression.value
elif isinstance(expression, ast.Tuple):
return tuple(
evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts
)
elif isinstance(expression, (ast.ListComp, ast.GeneratorExp)):
return evaluate_listcomp(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.UnaryOp):
return evaluate_unaryop(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Starred):
return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.BoolOp):
# Boolean operation -> evaluate the operation
return evaluate_boolop(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Break):
raise BreakException()
elif isinstance(expression, ast.Continue):
raise ContinueException()
elif isinstance(expression, ast.BinOp):
# Binary operation -> execute operation
return evaluate_binop(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Compare):
# Comparison -> evaluate the comparison
return evaluate_condition(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Lambda):
return evaluate_lambda(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.FunctionDef):
return evaluate_function_def(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Dict):
# Dict -> evaluate all keys and values
keys = [evaluate_ast(k, state, static_tools, custom_tools, authorized_imports) for k in expression.keys]
values = [evaluate_ast(v, state, static_tools, custom_tools, authorized_imports) for v in expression.values]
return dict(zip(keys, values))
elif isinstance(expression, ast.Expr):
# Expression -> evaluate the content
return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.For):
# For loop -> execute the loop
return evaluate_for(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.FormattedValue):
# Formatted value (part of f-string) -> evaluate the content and return
return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.If):
# If -> execute the right branch
return evaluate_if(expression, state, static_tools, custom_tools, authorized_imports)
elif hasattr(ast, "Index") and isinstance(expression, ast.Index):
return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.JoinedStr):
return "".join(
[str(evaluate_ast(v, state, static_tools, custom_tools, authorized_imports)) for v in expression.values]
)
elif isinstance(expression, ast.List):
# List -> evaluate all elements
return [evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts]
elif isinstance(expression, ast.Name):
# Name -> pick up the value in the state
return evaluate_name(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Subscript):
# Subscript -> return the value of the indexing
return evaluate_subscript(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.IfExp):
test_val = evaluate_ast(expression.test, state, static_tools, custom_tools, authorized_imports)
if test_val:
return evaluate_ast(expression.body, state, static_tools, custom_tools, authorized_imports)
else:
return evaluate_ast(expression.orelse, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Attribute):
value = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
return getattr(value, expression.attr)
elif isinstance(expression, ast.Slice):
return slice(
evaluate_ast(expression.lower, state, static_tools, custom_tools, authorized_imports)
if expression.lower is not None
else None,
evaluate_ast(expression.upper, state, static_tools, custom_tools, authorized_imports)
if expression.upper is not None
else None,
evaluate_ast(expression.step, state, static_tools, custom_tools, authorized_imports)
if expression.step is not None
else None,
)
elif isinstance(expression, ast.DictComp):
return evaluate_dictcomp(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.While):
return evaluate_while(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, (ast.Import, ast.ImportFrom)):
return import_modules(expression, state, authorized_imports)
elif isinstance(expression, ast.ClassDef):
return evaluate_class_def(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Try):
return evaluate_try(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Raise):
return evaluate_raise(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Assert):
return evaluate_assert(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.With):
return evaluate_with(expression, state, static_tools, custom_tools, authorized_imports)
elif isinstance(expression, ast.Set):
return {evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts}
elif isinstance(expression, ast.Return):
raise ReturnException(
evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports)
if expression.value
else None
)
elif isinstance(expression, ast.Pass):
return None
elif isinstance(expression, ast.Delete):
return evaluate_delete(expression, state, static_tools, custom_tools, authorized_imports)
else:
# For now we refuse anything else. Let's add things as we need them.
raise InterpreterError(f"{expression.__class__.__name__} is not supported.")
class FinalAnswerException(Exception):
def __init__(self, value):
self.value = value
def evaluate_python_code(
code: str,
static_tools: Optional[Dict[str, Callable]] = None,
custom_tools: Optional[Dict[str, Callable]] = None,
state: Optional[Dict[str, Any]] = None,
authorized_imports: List[str] = BASE_BUILTIN_MODULES,
max_print_outputs_length: int = DEFAULT_MAX_LEN_OUTPUT,
):
"""
Evaluate a python expression using the content of the variables stored in a state and only evaluating a given set
of functions.
This function will recurse through the nodes of the tree provided.
Args:
code (`str`):
The code to evaluate.
static_tools (`Dict[str, Callable]`):
The functions that may be called during the evaluation. These can also be agents in a multiagent setting.
These tools cannot be overwritten in the code: any assignment to their name will raise an error.
custom_tools (`Dict[str, Callable]`):
The functions that may be called during the evaluation.
These tools can be overwritten in the code: any assignment to their name will overwrite them.
state (`Dict[str, Any]`):
A dictionary mapping variable names to values. The `state` should contain the initial inputs but will be
updated by this function to contain all variables as they are evaluated.
The print outputs will be stored in the state under the key "_print_outputs".
"""
try:
expression = ast.parse(code)
except SyntaxError as e:
raise InterpreterError(
f"Code parsing failed on line {e.lineno} due to: {type(e).__name__}\n"
f"{e.text}"
f"{' ' * (e.offset or 0)}^\n"
f"Error: {str(e)}"
)
if state is None:
state = {}
static_tools = static_tools.copy() if static_tools is not None else {}
custom_tools = custom_tools if custom_tools is not None else {}
result = None
state["_print_outputs"] = PrintContainer()
state["_operations_count"] = 0
def final_answer(value):
raise FinalAnswerException(value)
static_tools["final_answer"] = final_answer
try:
for node in expression.body:
result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports)
state["_print_outputs"].value = truncate_content(
str(state["_print_outputs"]), max_length=max_print_outputs_length
)
is_final_answer = False
return result, is_final_answer
except FinalAnswerException as e:
state["_print_outputs"].value = truncate_content(
str(state["_print_outputs"]), max_length=max_print_outputs_length
)
is_final_answer = True
return e.value, is_final_answer
except Exception as e:
exception_type = type(e).__name__
state["_print_outputs"].value = truncate_content(
str(state["_print_outputs"]), max_length=max_print_outputs_length
)
raise InterpreterError(
f"Code execution failed at line '{ast.get_source_segment(code, node)}' due to: {exception_type}:{str(e)}"
)
class LocalPythonInterpreter:
def __init__(
self,
additional_authorized_imports: List[str],
tools: Dict,
max_print_outputs_length: Optional[int] = None,
):
self.custom_tools = {}
self.state = {}
self.max_print_outputs_length = max_print_outputs_length
if max_print_outputs_length is None:
self.max_print_outputs_length = DEFAULT_MAX_LEN_OUTPUT
self.additional_authorized_imports = additional_authorized_imports
self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports))
# Add base trusted tools to list
self.static_tools = {
**tools,
**BASE_PYTHON_TOOLS.copy(),
}
# TODO: assert self.authorized imports are all installed locally
def __call__(self, code_action: str, additional_variables: Dict) -> Tuple[Any, str, bool]:
self.state.update(additional_variables)
output, is_final_answer = evaluate_python_code(
code_action,
static_tools=self.static_tools,
custom_tools=self.custom_tools,
state=self.state,
authorized_imports=self.authorized_imports,
max_print_outputs_length=self.max_print_outputs_length,
)
logs = str(self.state["_print_outputs"])
return output, logs, is_final_answer
__all__ = ["evaluate_python_code", "LocalPythonInterpreter"]
| smolagents/src/smolagents/local_python_executor.py/0 | {
"file_path": "smolagents/src/smolagents/local_python_executor.py",
"repo_id": "smolagents",
"token_count": 22987
} |
from unittest.mock import MagicMock, patch
from smolagents.e2b_executor import E2BExecutor
class TestE2BExecutor:
def test_e2b_executor_instantiation(self):
logger = MagicMock()
with patch("e2b_code_interpreter.Sandbox") as mock_sandbox:
mock_sandbox.return_value.commands.run.return_value.error = None
mock_sandbox.return_value.run_code.return_value.error = None
executor = E2BExecutor(additional_imports=[], tools=[], logger=logger)
assert isinstance(executor, E2BExecutor)
assert executor.logger == logger
assert executor.final_answer is False
assert executor.custom_tools == {}
assert executor.final_answer_pattern.pattern == r"final_answer\((.*?)\)"
assert executor.sbx == mock_sandbox.return_value
| smolagents/tests/test_e2b_executor.py/0 | {
"file_path": "smolagents/tests/test_e2b_executor.py",
"repo_id": "smolagents",
"token_count": 333
} |
use std::fs;
fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("cargo:rerun-if-changed=../../proto/");
fs::create_dir_all("src/v2/pb").unwrap_or(());
let mut config = prost_build::Config::new();
config.protoc_arg("--experimental_allow_proto3_optional");
tonic_build::configure()
.build_client(true)
.build_server(false)
.out_dir("src/v2/pb")
.include_file("mod.rs")
.compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"])
.map_err(|e| match e.kind(){
std::io::ErrorKind::NotFound => {panic!("`protoc` not found, install libprotoc")},
std::io::ErrorKind::Other => {panic!("`protoc` version unsupported, upgrade protoc: https://github.com/protocolbuffers/protobuf/releases")},
e => {e}
}).unwrap_or_else(|e| panic!("protobuf compilation failed: {e}"));
fs::create_dir_all("src/v3/pb").unwrap_or(());
let mut config = prost_build::Config::new();
config.protoc_arg("--experimental_allow_proto3_optional");
tonic_build::configure()
.build_client(true)
.build_server(false)
.out_dir("src/v3/pb")
.include_file("mod.rs")
.compile_with_config(config, &["../../proto/v3/generate.proto"], &["../../proto"])
.unwrap_or_else(|e| panic!("protobuf compilation failed: {e}"));
Ok(())
}
| text-generation-inference/backends/client/build.rs/0 | {
"file_path": "text-generation-inference/backends/client/build.rs",
"repo_id": "text-generation-inference",
"token_count": 624
} |
set(TRT_INCLUDE_DIR ${TGI_TRTLLM_BACKEND_TRT_INCLUDE_DIR})
set(TRT_LIB_DIR ${TGI_TRTLLM_BACKEND_TRT_LIB_DIR})
set(USE_CXX11_ABI ON)
set(BUILD_PYT OFF)
set(BUILD_PYBIND OFF)
set(BUILD_MICRO_BENCHMARKS OFF)
set(BUILD_BENCHMARKS OFF)
set(BUILD_TESTS OFF)
set(CMAKE_CUDA_ARCHITECTURES ${TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST})
message(STATUS "Building for CUDA Architectures: ${CMAKE_CUDA_ARCHITECTURES}")
set(ENABLE_UCX OFF)
if (${CMAKE_BUILD_TYPE} STREQUAL "Debug")
set(FAST_BUILD ON)
set(NVTX_DISABLE ON)
set(INDEX_RANGE_CHECK ON)
else ()
set(FAST_BUILD OFF)
set(FAST_MATH ON)
set(NVTX_DISABLE OFF)
set(INDEX_RANGE_CHECK OFF)
endif ()
find_package(Python3 REQUIRED Interpreter)
fetchcontent_declare(
trtllm
GIT_REPOSITORY https://github.com/nvidia/TensorRT-LLM.git
GIT_TAG v0.17.0
GIT_SHALLOW ON
DOWNLOAD_EXTRACT_TIMESTAMP
)
fetchcontent_makeavailable(trtllm)
message(STATUS "Found TensorRT-LLM: ${trtllm_SOURCE_DIR}")
execute_process(COMMAND git lfs install WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/")
execute_process(COMMAND git lfs pull WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/")
# TRTLLM use a JIT based *precompiled* library to generate some specific kernels, we are generating the path to this one here
set(TRTLLM_NVRTC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_nvrtc_wrapper${CMAKE_SHARED_LIBRARY_SUFFIX}" CACHE INTERNAL "nvrtc wrapper library name")
set(TRTLLM_NVRTC_WRAPPER_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_NVRTC_LIBRARY_NAME}"
CACHE INTERNAL "nvrtc wrapper library path")
# The same Executor Static library
set(TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_executor_static${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE INTERNAL "executor_static library name")
set(TRTLLM_EXECUTOR_STATIC_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/executor/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME}" CACHE INTERNAL "executor_static library path")
| text-generation-inference/backends/trtllm/cmake/trtllm.cmake/0 | {
"file_path": "text-generation-inference/backends/trtllm/cmake/trtllm.cmake",
"repo_id": "text-generation-inference",
"token_count": 976
} |
mod backend;
pub mod block_allocator;
mod client;
mod queue;
pub mod radix;
use crate::client::{ClientError, ShardedClient};
pub(crate) use backend::BackendV3;
use serde::Serialize;
use thiserror::Error;
use utoipa::ToSchema;
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct BackendInfo {
/// Mandatory
#[schema(example = "cuda")]
pub model_device_type: String,
#[schema(example = "torch.float16")]
pub model_dtype: String,
/// Backend parameters
#[schema(example = "1")]
pub speculate: usize,
#[schema(example = "1.2")]
pub waiting_served_ratio: f32,
#[schema(example = "32000")]
pub max_batch_total_tokens: u32,
#[schema(example = "20")]
pub max_waiting_tokens: usize,
#[schema(nullable = true, example = "null")]
pub max_batch_size: Option<usize>,
#[schema(example = "false")]
pub support_chunking: bool,
#[schema(example = "false")]
pub prefix_caching: bool,
#[schema(example = "flashinfer")]
pub attention_impl: String,
#[schema(example = "1")]
pub block_size: u32,
#[schema(example = "30000")]
pub max_input_tokens: usize,
#[schema(example = "32000")]
pub max_total_tokens: usize,
}
#[allow(clippy::too_many_arguments)]
pub async fn connect_backend(
max_input_tokens: Option<usize>,
max_total_tokens: Option<usize>,
master_shard_uds_path: String,
waiting_served_ratio: f32,
max_batch_prefill_tokens: u32,
max_batch_total_tokens: Option<u32>,
max_waiting_tokens: usize,
max_batch_size: Option<usize>,
) -> Result<(BackendV3, BackendInfo), V3Error> {
// Helper function
let check_max_batch_total_tokens = |(
max_supported_batch_total_tokens,
shard_max_input_tokens,
shard_max_total_tokens,
): (Option<u32>, u32, u32)|
-> Result<(u32, usize, usize), V3Error> {
if let Some(max_input_tokens) = max_input_tokens {
assert_eq!(max_input_tokens as u32, shard_max_input_tokens);
}
if let Some(max_total_tokens) = max_total_tokens {
assert_eq!(max_total_tokens as u32, shard_max_total_tokens);
}
match max_supported_batch_total_tokens {
// Older models do not support automatic max-batch-total-tokens
None => {
let max_batch_total_tokens = max_batch_total_tokens.unwrap_or(
16000
.max(shard_max_total_tokens)
.max(max_batch_prefill_tokens),
);
tracing::warn!("Model does not support automatic max batch total tokens");
Ok((
max_batch_total_tokens,
shard_max_input_tokens as usize,
shard_max_total_tokens as usize,
))
}
// Flash attention models return their max supported total tokens
Some(max_supported_batch_total_tokens) => {
// Warn if user added his own max-batch-total-tokens as we will ignore it
if max_batch_total_tokens.is_some() {
tracing::warn!(
"`--max-batch-total-tokens` is deprecated for Flash \
Attention models."
);
tracing::warn!(
"Inferred max batch total tokens: {max_supported_batch_total_tokens}"
);
}
if shard_max_total_tokens > max_supported_batch_total_tokens {
return Err(V3Error::NotEnoughMemory(shard_max_total_tokens as usize));
}
Ok((
max_supported_batch_total_tokens,
shard_max_input_tokens as usize,
shard_max_total_tokens as usize,
))
}
}
};
let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
.await
.map_err(V3Error::Connection)?;
// server is running on v3
// Clear the cache; useful if the webserver rebooted
sharded_client
.clear_cache(None)
.await
.map_err(V3Error::Cache)?;
// Get info from the shard
let shard_info = sharded_client.info().await.map_err(V3Error::Info)?;
// Warmup model
tracing::info!("Warming up model");
let answer = sharded_client
.warmup(
max_input_tokens.map(|p| p as u32),
max_batch_prefill_tokens,
max_total_tokens.map(|p| p as u32),
max_batch_size,
)
.await
.map_err(V3Error::Warmup)?;
let (max_batch_total_tokens, max_input_tokens, max_total_tokens) =
check_max_batch_total_tokens(answer)?;
tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}");
metrics::gauge!("tgi_batch_max_total_tokens").set(max_batch_total_tokens);
let backend_info = BackendInfo {
waiting_served_ratio,
max_batch_total_tokens,
max_input_tokens,
max_total_tokens,
max_waiting_tokens,
max_batch_size,
model_device_type: shard_info.device_type.clone(),
model_dtype: shard_info.dtype.clone(),
speculate: shard_info.speculate as usize,
support_chunking: shard_info.support_chunking,
prefix_caching: shard_info.use_prefix_caching,
attention_impl: shard_info.attention_impl.clone(),
block_size: shard_info.block_size,
};
let backend = BackendV3::new(
sharded_client,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
shard_info,
);
tracing::info!("Using backend V3");
Ok((backend, backend_info))
}
#[derive(Debug, Error)]
pub enum V3Error {
#[error("Unable to clear the Python model shards cache: {0}")]
Cache(ClientError),
#[error("Unable to connect to the Python model shards: {0}")]
Connection(ClientError),
#[error("Unable to get the Python model shards info: {0}")]
Info(ClientError),
#[error("Unable to warmup the Python model shards: {0}")]
Warmup(ClientError),
#[error("Not enough memory to handle `max_total_tokens={0}`")]
NotEnoughMemory(usize),
}
| text-generation-inference/backends/v3/src/lib.rs/0 | {
"file_path": "text-generation-inference/backends/v3/src/lib.rs",
"repo_id": "text-generation-inference",
"token_count": 3087
} |
- sections:
- local: index
title: Text Generation Inference
- local: quicktour
title: Quick Tour
- local: supported_models
title: Supported Models
- local: installation_nvidia
title: Using TGI with Nvidia GPUs
- local: installation_amd
title: Using TGI with AMD GPUs
- local: installation_gaudi
title: Using TGI with Intel Gaudi
- local: installation_inferentia
title: Using TGI with AWS Inferentia
- local: installation_tpu
title: Using TGI with Google TPUs
- local: installation_intel
title: Using TGI with Intel GPUs
- local: installation
title: Installation from source
- local: multi_backend_support
title: Multi-backend support
- local: architecture
title: Internal Architecture
- local: usage_statistics
title: Usage Statistics
title: Getting started
- sections:
- local: basic_tutorials/consuming_tgi
title: Consuming TGI
- local: basic_tutorials/preparing_model
title: Preparing Model for Serving
- local: basic_tutorials/gated_model_access
title: Serving Private & Gated Models
- local: basic_tutorials/using_cli
title: Using TGI CLI
- local: basic_tutorials/non_core_models
title: Non-core Model Serving
- local: basic_tutorials/safety
title: Safety
- local: basic_tutorials/using_guidance
title: Using Guidance, JSON, tools
- local: basic_tutorials/visual_language_models
title: Visual Language Models
- local: basic_tutorials/monitoring
title: Monitoring TGI with Prometheus and Grafana
- local: basic_tutorials/train_medusa
title: Train Medusa
title: Tutorials
- sections:
- local: backends/trtllm
title: TensorRT-LLM
title: Backends
- sections:
- local: reference/launcher
title: All TGI CLI options
- local: reference/metrics
title: Exported Metrics
- local: reference/api_reference
title: API Reference
title: Reference
- sections:
- local: conceptual/chunking
title: V3 update, caching and chunking
- local: conceptual/streaming
title: Streaming
- local: conceptual/quantization
title: Quantization
- local: conceptual/tensor_parallelism
title: Tensor Parallelism
- local: conceptual/paged_attention
title: PagedAttention
- local: conceptual/safetensors
title: Safetensors
- local: conceptual/flash_attention
title: Flash Attention
- local: conceptual/speculation
title: Speculation (Medusa, ngram)
- local: conceptual/guidance
title: How Guidance Works (via outlines)
- local: conceptual/lora
title: LoRA (Low-Rank Adaptation)
- local: conceptual/external
title: External Resources
title: Conceptual Guides
| text-generation-inference/docs/source/_toctree.yml/0 | {
"file_path": "text-generation-inference/docs/source/_toctree.yml",
"repo_id": "text-generation-inference",
"token_count": 864
} |
# Guidance
## What is Guidance?
Guidance is a feature that allows users to constrain the generation of a large language model with a specified grammar. This feature is particularly useful when you want to generate text that follows a specific structure or uses a specific set of words or produce output in a specific format. A prominent example is JSON grammar, where the model is forced to output valid JSON.
## How is it used?
Guidance can be implemented in many ways and the community is always finding new ways to use it. Here are some examples of how you can use guidance:
Technically, guidance can be used to generate:
- a specific JSON object
- a function signature
- typed output like a list of integers
However these use cases can span a wide range of applications, such as:
- extracting structured data from unstructured text
- summarizing text into a specific format
- limit output to specific classes of words (act as a LLM powered classifier)
- generate the input to specific APIs or services
- provide reliable and consistent output for downstream tasks
- extract data from multimodal inputs
## How it works?
Diving into the details, guidance is enabled by including a grammar with a generation request that is compiled, and used to modify the chosen tokens.
This process can be broken down into the following steps:
1. A request is sent to the backend, it is processed and placed in batch. Processing includes compiling the grammar into a finite state machine and a grammar state.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch-dark.gif"
/>
</div>
2. The model does a forward pass over the batch. This returns probabilities for each token in the vocabulary for each request in the batch.
3. The process of choosing one of those tokens is called `sampling`. The model samples from the distribution of probabilities to choose the next token. In TGI all of the steps before sampling are called `processor`. Grammars are applied as a processor that masks out tokens that are not allowed by the grammar.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask-dark.gif"
/>
</div>
4. The grammar mask is applied and the model samples from the remaining tokens. Once a token is chosen, we update the grammar state with the new token, to prepare it for the next pass.
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits-dark.gif"
/>
</div>
## How to use Guidance?
There are two main ways to use guidance; you can either use the `/generate` endpoint with a grammar or use the `/chat/completion` endpoint with tools.
Under the hood tools are a special case of grammars that allows the model to choose one or none of the provided tools.
Please refer to [using guidance](../basic_tutorials/using_guidance) for more examples and details on how to use guidance in Python, JavaScript, and cURL.
### Getting the most out of guidance
Depending on how you are using guidance, you may want to make use of different features. Here are some tips to get the most out of guidance:
- If you are using the `/generate` with a `grammar` it is recommended to include the grammar in the prompt prefixed by something like `Please use the following JSON schema to generate the output:`. This will help the model understand the context of the grammar and generate the output accordingly.
- If you are getting a response with many repeated tokens, please use the `frequency_penalty` or `repetition_penalty` to reduce the number of repeated tokens in the output.
| text-generation-inference/docs/source/conceptual/guidance.md/0 | {
"file_path": "text-generation-inference/docs/source/conceptual/guidance.md",
"repo_id": "text-generation-inference",
"token_count": 1237
} |
# Multi-backend support
TGI (Text Generation Inference) offers flexibility by supporting multiple backends for serving large language models (LLMs).
With multi-backend support, you can choose the backend that best suits your needs,
whether you prioritize performance, ease of use, or compatibility with specific hardware. API interaction with
TGI remains consistent across backends, allowing you to switch between them seamlessly.
**Supported backends:**
* **TGI CUDA backend**: This high-performance backend is optimized for NVIDIA GPUs and serves as the default option
within TGI. Developed in-house, it boasts numerous optimizations and is used in production by various projects, including those by Hugging Face.
* **[TGI TRTLLM backend](./backends/trtllm)**: This backend leverages NVIDIA's TensorRT library to accelerate LLM inference.
It utilizes specialized optimizations and custom kernels for enhanced performance.
However, it requires a model-specific compilation step for each GPU architecture.
| text-generation-inference/docs/source/multi_backend_support.md/0 | {
"file_path": "text-generation-inference/docs/source/multi_backend_support.md",
"repo_id": "text-generation-inference",
"token_count": 223
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 29946,
"logprob": -1.4765625,
"special": false,
"text": "4"
},
{
"id": 29906,
"logprob": -0.9199219,
"special": false,
"text": "2"
},
{
"id": 29889,
"logprob": 0.0,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -1.1367188,
"special": false,
"text": "1"
},
{
"id": 29889,
"logprob": -1.4648438,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -0.40722656,
"special": false,
"text": "1"
},
{
"id": 29889,
"logprob": -0.17419434,
"special": false,
"text": "."
},
{
"id": 29896,
"logprob": -0.20251465,
"special": false,
"text": "1"
},
{
"id": 29900,
"logprob": -1.5527344,
"special": false,
"text": "0"
},
{
"id": 29896,
"logprob": -1.3710938,
"special": false,
"text": "1"
}
],
"top_tokens": null
},
"generated_text": "42.1.1.101"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json",
"repo_id": "text-generation-inference",
"token_count": 860
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 1313,
"logprob": -2.3613281,
"special": false,
"text": "It"
},
{
"id": 3969,
"logprob": -0.7285156,
"special": false,
"text": " seems"
},
{
"id": 298,
"logprob": -1.3466797,
"special": false,
"text": " to"
},
{
"id": 528,
"logprob": 0.0,
"special": false,
"text": " me"
},
{
"id": 28725,
"logprob": -1.6757812,
"special": false,
"text": ","
},
{
"id": 369,
"logprob": 0.0,
"special": false,
"text": " that"
},
{
"id": 513,
"logprob": -1.1269531,
"special": false,
"text": " if"
},
{
"id": 368,
"logprob": 0.0,
"special": false,
"text": " you"
},
{
"id": 28742,
"logprob": -2.4921875,
"special": false,
"text": "'"
},
{
"id": 267,
"logprob": 0.0,
"special": false,
"text": "re"
}
],
"top_tokens": null
},
"generated_text": "What is gradient descent?\n\nIt seems to me, that if you're"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 858
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "stop_sequence",
"generated_tokens": 6,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 284,
"logprob": -0.28955078,
"special": false,
"text": " to"
},
{
"id": 3758,
"logprob": -0.7739258,
"special": false,
"text": " send"
},
{
"id": 1366,
"logprob": -0.85253906,
"special": false,
"text": " data"
},
{
"id": 625,
"logprob": -0.8984375,
"special": false,
"text": " over"
},
{
"id": 257,
"logprob": -1.0830078,
"special": false,
"text": " a"
},
{
"id": 3127,
"logprob": -1.9404297,
"special": false,
"text": " network"
}
],
"top_tokens": null
},
"generated_text": "Test request to send data over a network"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 568
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 288,
"logprob": -0.2854004,
"special": false,
"text": "ing"
},
{
"id": 264,
"logprob": -0.38061523,
"special": false,
"text": " a"
},
{
"id": 633,
"logprob": -0.09301758,
"special": false,
"text": " new"
},
{
"id": 4480,
"logprob": -0.26782227,
"special": false,
"text": " feature"
},
{
"id": 297,
"logprob": -0.8510742,
"special": false,
"text": " in"
},
{
"id": 272,
"logprob": -0.13464355,
"special": false,
"text": " the"
},
{
"id": 2039,
"logprob": 0.0,
"special": false,
"text": " game"
},
{
"id": 28723,
"logprob": -0.89990234,
"special": false,
"text": "."
},
{
"id": 13,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 13,
"logprob": -0.10632324,
"special": false,
"text": "\n"
}
],
"top_tokens": null
},
"generated_text": "Test requesting a new feature in the game.\n\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 860
} |
import pytest
@pytest.fixture(scope="module")
def compressed_tensors_w8an_handle(launcher):
with launcher(
"neuralmagic/Llama-3.2-1B-Instruct-FP8",
num_shard=2,
quantize="compressed-tensors",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def compressed_tensors_w8an(compressed_tensors_w8an_handle):
await compressed_tensors_w8an_handle.health(300)
return compressed_tensors_w8an_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8an(compressed_tensors_w8an, response_snapshot):
response = await compressed_tensors_w8an.generate(
"What is deep learning?",
max_new_tokens=10,
decoder_input_details=True,
)
assert (
response.generated_text
== " Deep learning is a type of artificial intelligence (AI"
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
async def test_compressed_tensors_w8an_all_params(
compressed_tensors_w8an, response_snapshot
):
response = await compressed_tensors_w8an.generate(
"What is deep learning",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "What is deep learning?\nDeep learning, also known as neural network or"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_compressed_tensors_w8an_load(
compressed_tensors_w8an, generate_load, response_snapshot
):
responses = await generate_load(
compressed_tensors_w8an,
"What is deep learning?",
max_new_tokens=10,
n=4,
)
assert (
responses[0].generated_text
== " Deep learning is a type of artificial intelligence (AI"
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_compressed_tensors_w8an_fp.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_w8an_fp.py",
"repo_id": "text-generation-inference",
"token_count": 1000
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_fp8_kv_cache_handle(launcher):
with launcher(
"neuralmagic/Meta-Llama-3-8B-Instruct-FP8-KV",
num_shard=2,
kv_cache_dtype="fp8_e4m3fn",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_fp8_kv_cache(flash_llama_fp8_kv_cache_handle):
await flash_llama_fp8_kv_cache_handle.health(300)
return flash_llama_fp8_kv_cache_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8_kv_cache(flash_llama_fp8_kv_cache, response_snapshot):
response = await flash_llama_fp8_kv_cache.generate(
"What is deep learning?", max_new_tokens=10, decoder_input_details=True
)
assert (
response.generated_text
== " Deep learning is a subset of machine learning that involves"
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8_kv_cache_all_params(
flash_llama_fp8_kv_cache, response_snapshot
):
response = await flash_llama_fp8_kv_cache.generate(
"What is deep learning?",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_fp8_kv_cache_load(
flash_llama_fp8_kv_cache, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_fp8_kv_cache, "What is deep learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert (
responses[0].generated_text
== " Deep learning is a subset of machine learning that involves"
)
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"Different messages : {[r.generated_text for r in responses]}"
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_fp8_kv_cache.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_fp8_kv_cache.py",
"repo_id": "text-generation-inference",
"token_count": 986
} |
import pytest
@pytest.fixture(scope="module")
def flash_phi35_moe_handle(launcher):
with launcher(
"microsoft/Phi-3.5-MoE-instruct",
num_shard=4,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_phi35_moe(flash_phi35_moe_handle):
await flash_phi35_moe_handle.health(300)
return flash_phi35_moe_handle.client
@pytest.mark.asyncio
async def test_flash_phi35_moe(flash_phi35_moe, response_snapshot):
response = await flash_phi35_moe.generate(
"What is gradient descent?\n\n", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "Gradient descent is an optimization algorithm commonly used in"
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_phi35_moe_all_params(flash_phi35_moe, response_snapshot):
response = await flash_phi35_moe.generate(
"What is gradient descent?\n",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "What is gradient descent?\nGradient Descent (GD) is an"
)
assert response == response_snapshot
@pytest.mark.asyncio
async def test_flash_phi35_moe_load(flash_phi35_moe, generate_load, response_snapshot):
responses = await generate_load(
flash_phi35_moe, "What is gradient descent?\n\n", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert responses[0].details.generated_tokens == 10
assert (
responses[0].generated_text
== "Gradient descent is an optimization algorithm commonly used in"
)
assert all(
[r.generated_text == responses[0].generated_text for r in responses]
), f"{[r.generated_text for r in responses]}"
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_phi35_moe.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_phi35_moe.py",
"repo_id": "text-generation-inference",
"token_count": 921
} |
import pytest
import asyncio
@pytest.fixture(scope="module")
def mllama_handle(launcher):
with launcher(
"meta-llama/Llama-3.2-11B-Vision-Instruct",
num_shard=2,
) as handle:
yield handle
@pytest.fixture(scope="module")
async def mllama(mllama_handle):
await mllama_handle.health(300)
return mllama_handle.client
@pytest.mark.asyncio
async def test_mllama_simpl(mllama, response_snapshot):
response = await mllama.chat(
max_tokens=10,
temperature=0.0,
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Can you tell me a very short story based on the image?",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png"
},
},
],
},
],
)
assert response.usage == {
"completion_tokens": 10,
"prompt_tokens": 50,
"total_tokens": 60,
}
assert (
response.choices[0].message.content == "In a small town, a chicken named Cluck"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_mllama_load(mllama, generate_load, response_snapshot):
futures = [
mllama.chat(
max_tokens=10,
temperature=0.0,
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Can you tell me a very short story based on the image?",
},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png"
},
},
],
},
],
)
# TODO with v3, 4 breaks here. Nothing accounts of the image VRAM
# because mllama is the only one doing its thing.
for i in range(2)
]
responses = await asyncio.gather(*futures)
generated_texts = [response.choices[0].message.content for response in responses]
# XXX: TODO: Fix this test.
assert generated_texts[0] == "In a small town, a chicken named Cluck"
assert len(generated_texts) == 2
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_mllama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_mllama.py",
"repo_id": "text-generation-inference",
"token_count": 1587
} |
pub fn get_cuda_capability() -> Option<(usize, usize)> {
use pyo3::prelude::*;
let py_get_capability = |py: Python| -> PyResult<(isize, isize)> {
let torch = py.import_bound("torch.cuda")?;
let get_device_capability = torch.getattr("get_device_capability")?;
get_device_capability.call0()?.extract()
};
match pyo3::Python::with_gil(py_get_capability) {
Ok((major, minor)) if major < 0 || minor < 0 => {
tracing::warn!("Ignoring negative GPU compute capabilities: {major}.{minor}");
None
}
Ok((major, minor)) => Some((major as usize, minor as usize)),
Err(err) => {
tracing::warn!("Cannot determine GPU compute capability: {}", err);
None
}
}
}
| text-generation-inference/launcher/src/gpu.rs/0 | {
"file_path": "text-generation-inference/launcher/src/gpu.rs",
"repo_id": "text-generation-inference",
"token_count": 350
} |
final: prev: {
# You can use this overlay to temporarily override packages for
# development. For permanent overrides, it's better to do this in
# our package flake:
#
# https://github.com/huggingface/text-generation-inference-nix
#
# Note that overriding packages that are in the transitive closure
# of many other packages (e.g. transformers) will require a large
# rebuild.
pythonPackagesExtensions = prev.pythonPackagesExtensions ++ [
(
python-self: python-super: with python-self; {
# Python package override example:
# transformers = python-super.transformers.overrideAttrs (
# _: _: {
# src = final.fetchFromGitHub {
# owner = "huggingface";
# repo = "transformers";
# rev = "2bd4d5897dc73e8b172832070a6f9e567a0df017";
# hash = "sha256-JOIpKH9ssDEfI2Tf15e0iPKtThJwQ9GxMvRAnm+M2Pg=";
# };
# }
# );
}
)
];
# Non-python package override example:
#
# ripgrep = prev.ripgrep.overrideAttrs (
# _: _: {
# src = final.fetchFromGitHub {
# owner = "BurntSushi";
# repo = "ripgrep";
# rev = "79cbe89deb1151e703f4d91b19af9cdcc128b765";
# hash = "sha256-JPTM2KNmGMb+/jOfK3X7OM1wnN+3TU35SJOIcqmp3mg=";
# };
# });
}
| text-generation-inference/nix/overlay.nix/0 | {
"file_path": "text-generation-inference/nix/overlay.nix",
"repo_id": "text-generation-inference",
"token_count": 633
} |
use crate::config::Config;
use clap::ValueEnum;
use csv::ReaderBuilder;
use reqwest::header::HeaderMap;
use serde::Serialize;
use std::{
fs::File,
io::{self, BufRead},
path::Path,
process::Command,
time::Duration,
};
use uuid::Uuid;
const TELEMETRY_URL: &str = "https://huggingface.co/api/telemetry/tgi";
#[derive(Copy, Clone, Debug, Serialize, ValueEnum)]
pub enum UsageStatsLevel {
On,
NoStack,
Off,
}
#[derive(Debug, Clone, Serialize)]
pub struct UserAgent {
pub uid: String,
pub args: Args,
pub env: Env,
}
impl UserAgent {
pub fn new(reduced_args: Args) -> Self {
Self {
uid: Uuid::new_v4().to_string(),
args: reduced_args,
env: Env::new(),
}
}
}
#[derive(Serialize, Debug)]
pub enum EventType {
Start,
Stop,
Error,
Ping,
}
#[derive(Debug, Serialize)]
pub struct UsageStatsEvent {
user_agent: UserAgent,
event_type: EventType,
#[serde(skip_serializing_if = "Option::is_none")]
error_reason: Option<String>,
}
impl UsageStatsEvent {
pub fn new(user_agent: UserAgent, event_type: EventType, error_reason: Option<String>) -> Self {
Self {
user_agent,
event_type,
error_reason,
}
}
pub async fn send(&self) {
let mut headers = HeaderMap::new();
headers.insert("Content-Type", "application/json".parse().unwrap());
let body = serde_json::to_string(&self).unwrap();
let client = reqwest::Client::new();
let _ = client
.post(TELEMETRY_URL)
.headers(headers)
.body(body)
.timeout(Duration::from_secs(10))
.send()
.await;
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Args {
model_config: Option<Config>,
tokenizer_class: Option<String>,
max_concurrent_requests: usize,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_tokens: usize,
max_total_tokens: usize,
// waiting_served_ratio: f32,
// max_batch_prefill_tokens: u32,
// max_batch_total_tokens: Option<u32>,
// max_waiting_tokens: usize,
// max_batch_size: Option<usize>,
revision: Option<String>,
validation_workers: usize,
disable_grammar_support: bool,
max_client_batch_size: usize,
usage_stats_level: UsageStatsLevel,
backend_name: &'static str,
}
impl Args {
#[allow(clippy::too_many_arguments)]
pub fn new(
model_config: Option<Config>,
tokenizer_class: Option<String>,
max_concurrent_requests: usize,
max_best_of: usize,
max_stop_sequences: usize,
max_top_n_tokens: u32,
max_input_tokens: usize,
max_total_tokens: usize,
// waiting_served_ratio: f32,
// max_batch_prefill_tokens: u32,
// max_batch_total_tokens: Option<u32>,
// max_waiting_tokens: usize,
// max_batch_size: Option<usize>,
revision: Option<String>,
validation_workers: usize,
disable_grammar_support: bool,
max_client_batch_size: usize,
usage_stats_level: UsageStatsLevel,
backend_name: &'static str,
) -> Self {
Self {
model_config,
tokenizer_class,
max_concurrent_requests,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
// waiting_served_ratio,
// max_batch_prefill_tokens,
// max_batch_total_tokens,
// max_waiting_tokens,
// max_batch_size,
revision,
validation_workers,
disable_grammar_support,
max_client_batch_size,
usage_stats_level,
backend_name,
}
}
}
/// This is more or less a copy of the code from the `text-generation-launcher` crate to avoid a dependency
#[derive(Serialize, Debug, Clone)]
pub struct Env {
git_sha: &'static str,
docker_label: &'static str,
nvidia_info: Option<Vec<NvidiaSmiInfo>>,
xpu_info: Option<Vec<XpuSmiInfo>>,
system_env: SystemInfo,
}
#[derive(Debug, Serialize, Clone)]
struct NvidiaSmiInfo {
name: String,
pci_bus_id: String,
driver_version: String,
pstate: String,
pcie_link_gen_max: String,
pcie_link_gen_current: String,
temperature_gpu: String,
utilization_gpu: String,
utilization_memory: String,
memory_total: String,
memory_free: String,
memory_used: String,
reset_status_reset_required: String,
reset_status_drain_and_reset_recommended: String,
compute_cap: String,
ecc_errors_corrected_volatile_total: String,
mig_mode_current: String,
power_draw_instant: String,
power_limit: String,
}
impl NvidiaSmiInfo {
fn new() -> Option<Vec<NvidiaSmiInfo>> {
let output = Command::new("nvidia-smi")
.args([
"--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.gpucurrent,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used,reset_status.reset_required,reset_status.drain_and_reset_recommended,compute_cap,ecc.errors.corrected.volatile.total,mig.mode.current,power.draw.instant,power.limit",
"--format=csv"
])
.output()
.ok()?;
if !output.status.success() {
return None;
}
let stdout = String::from_utf8(output.stdout).ok()?;
let mut rdr = ReaderBuilder::new()
.has_headers(true)
.from_reader(stdout.as_bytes());
let mut infos = Vec::new();
for result in rdr.records() {
let record = result.ok()?;
infos.push(NvidiaSmiInfo {
name: record[0].to_string(),
pci_bus_id: record[1].to_string(),
driver_version: record[2].to_string(),
pstate: record[3].to_string(),
pcie_link_gen_max: record[4].to_string(),
pcie_link_gen_current: record[5].to_string(),
temperature_gpu: record[6].to_string(),
utilization_gpu: record[7].to_string(),
utilization_memory: record[8].to_string(),
memory_total: record[9].to_string(),
memory_free: record[10].to_string(),
memory_used: record[11].to_string(),
reset_status_reset_required: record[12].to_string(),
reset_status_drain_and_reset_recommended: record[13].to_string(),
compute_cap: record[14].to_string(),
ecc_errors_corrected_volatile_total: record[15].to_string(),
mig_mode_current: record[16].to_string(),
power_draw_instant: record[17].to_string(),
power_limit: record[18].to_string(),
});
}
Some(infos)
}
}
#[derive(Debug, Serialize, Clone)]
struct XpuSmiInfo {
device_id: usize,
gpu_utilization: f32,
gpu_power: f32,
gpu_core_temperature: f32,
gpu_memory_bandwidth_utilization: f32,
}
impl XpuSmiInfo {
/// based on this https://github.com/intel/xpumanager/blob/master/doc/smi_user_guide.md#dump-the-device-statistics-in-csv-format
fn new() -> Option<Vec<XpuSmiInfo>> {
let output = Command::new("xpu-smi")
.args([
"dump", "-d", "-1", "-m",
"0,1,3,17", // Metrics IDs: GPU Utilization, GPU Power, GPU Core Temperature, GPU Memory Bandwidth Utilization
"-n", "1", "-j",
])
.output()
.ok()?;
if !output.status.success() {
return None;
}
let stdout = String::from_utf8(output.stdout).ok()?;
let mut infos = Vec::new();
let json_data: serde_json::Value = match serde_json::from_str(&stdout) {
Ok(data) => data,
Err(_) => return None,
};
if let Some(metrics_data) = json_data.as_array() {
for entry in metrics_data {
let device_id = entry["deviceId"].as_u64()? as usize;
let gpu_utilization = entry["metrics"][0].as_f64()? as f32;
let gpu_power = entry["metrics"][1].as_f64()? as f32;
let gpu_core_temperature = entry["metrics"][2].as_f64()? as f32;
let gpu_memory_bandwidth_utilization = entry["metrics"][3].as_f64()? as f32;
infos.push(XpuSmiInfo {
device_id,
gpu_utilization,
gpu_power,
gpu_core_temperature,
gpu_memory_bandwidth_utilization,
});
}
}
Some(infos)
}
}
#[derive(Serialize, Debug, Clone)]
pub struct SystemInfo {
cpu_count: usize,
cpu_type: String,
total_memory: u64,
architecture: String,
platform: String,
}
impl SystemInfo {
fn new() -> Self {
let mut system = sysinfo::System::new_all();
system.refresh_all();
let cpu_count = system.cpus().len();
let cpu_type = system.cpus()[0].brand().to_string();
let total_memory = system.total_memory();
let architecture = std::env::consts::ARCH.to_string();
let platform = format!(
"{}-{}-{}",
std::env::consts::OS,
std::env::consts::FAMILY,
std::env::consts::ARCH
);
Self {
cpu_count,
cpu_type,
total_memory,
architecture,
platform,
}
}
}
impl Default for Env {
fn default() -> Self {
Self::new()
}
}
impl Env {
pub fn new() -> Self {
Self {
system_env: SystemInfo::new(),
nvidia_info: NvidiaSmiInfo::new(),
xpu_info: XpuSmiInfo::new(),
git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"),
docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"),
}
}
}
pub fn is_container() -> io::Result<bool> {
let path = Path::new("/proc/self/cgroup");
let file = File::open(path)?;
let reader = io::BufReader::new(file);
for line in reader.lines() {
let line = line?;
// Check for common container runtimes
if line.contains("/docker/")
|| line.contains("/docker-")
|| line.contains("/kubepods/")
|| line.contains("/kubepods-")
|| line.contains("containerd")
|| line.contains("crio")
|| line.contains("podman")
{
return Ok(true);
}
}
Ok(false)
}
| text-generation-inference/router/src/usage_stats.rs/0 | {
"file_path": "text-generation-inference/router/src/usage_stats.rs",
"repo_id": "text-generation-inference",
"token_count": 5315
} |
# Text Generation Inference Python gRPC Server
A Python gRPC server for Text Generation Inference
## Install
```shell
make install
```
## Run
```shell
make run-dev
```
| text-generation-inference/server/README.md/0 | {
"file_path": "text-generation-inference/server/README.md",
"repo_id": "text-generation-inference",
"token_count": 56
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _matrix_cuh
#define _matrix_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
class MatrixView_half
{
public:
const half* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; }
__device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; }
__device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); }
__device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; }
};
class MatrixView_half_rw
{
public:
half* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; }
__device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; }
__device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; }
__device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; }
__device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; }
};
class MatrixView_q4_row
{
public:
const uint32_t* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ int item(int row, int column) const
{
int shift = (column & 0x07) * 4;
return (data[row * width / 8 + column / 8] >> shift) & 0x0f;
}
};
class MatrixView_q4_column
{
public:
const uint32_t* data;
const int height;
const int width;
__device__ __forceinline__ MatrixView_q4_column(const uint32_t* data, const int height, const int width)
: data(data), height(height), width(width)
{ }
__device__ __forceinline__ int item(int row, int column) const
{
int shift = (row & 0x07) * 4;
return (data[row / 8 * width + column] >> shift) & 0x0f;
}
__device__ __forceinline__ uint32_t item_uint32_t(int row, int column) { return data[row / 8 * width + column]; }
__device__ __forceinline__ const uint32_t* item_uint32_ptr(int row, int column) { return &data[row / 8 * width + column]; }
};
// TODO: Rewrite all these dot product functions using functors or something, move to q4_matmul.cu
// Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale
__device__ __forceinline__ half2 dot_product_8
(
const half2 acc,
MatrixView_half& h_,
const int h_row,
const int h_column, // divisible by 8
MatrixView_q4_column& v_,
const int v_row, // divisible by 8
const int v_column,
const half2 v_scale_2,
const uint32_t v_zero, // + 1 (!!)
const int count
)
{
const half2* h_ptr = (const half2*) h_.item_ptr(h_row, h_column);
const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column);
half2 result = acc;
for (int i = 0; i < count; i++)
{
uint32_t v_read = *v_ptr; v_ptr += v_.width;
half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero);
half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero);
half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero);
half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero);
half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero);
half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero);
half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero);
half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero);
half2 v_01 = __halves2half2(v_0, v_1);
half2 v_23 = __halves2half2(v_2, v_3);
half2 v_45 = __halves2half2(v_4, v_5);
half2 v_67 = __halves2half2(v_6, v_7);
// half2 v_01 = q4_table[v_zero - 1][(v_read ) & 0xff]; // (constant memory is too slow apparently)
// half2 v_23 = q4_table[v_zero - 1][(v_read >> 8) & 0xff];
// half2 v_45 = q4_table[v_zero - 1][(v_read >> 16) & 0xff];
// half2 v_67 = q4_table[v_zero - 1][(v_read >> 24) ];
half2 tmp = __hmul2(*h_ptr++, v_01);
tmp = __hfma2(*h_ptr++, v_23, tmp);
tmp = __hfma2(*h_ptr++, v_45, tmp);
tmp = __hfma2(*h_ptr++, v_67, tmp);
result = __hfma2(v_scale_2, tmp, result);
}
return result;
}
__device__ __forceinline__ half dot_product_8_h
(
const half acc,
MatrixView_half& h_,
const int h_row,
const int h_column, // divisible by 8
MatrixView_q4_column& v_,
const int v_row, // divisible by 8
const int v_column,
const half v_scale,
const uint32_t v_zero, // + 1 (!!)
const int count
)
{
const half* h_ptr = h_.item_ptr(h_row, h_column);
const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column);
half result = acc;
for (int i = 0; i < count; i++)
{
uint32_t v_read = *v_ptr; v_ptr += v_.width;
half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero);
half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero);
half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero);
half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero);
half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero);
half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero);
half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero);
half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero);
half tmp = __hmul(*h_ptr++, v_0);
tmp = __hfma(*h_ptr++, v_1, tmp);
tmp = __hfma(*h_ptr++, v_2, tmp);
tmp = __hfma(*h_ptr++, v_3, tmp);
tmp = __hfma(*h_ptr++, v_4, tmp);
tmp = __hfma(*h_ptr++, v_5, tmp);
tmp = __hfma(*h_ptr++, v_6, tmp);
tmp = __hfma(*h_ptr++, v_7, tmp);
result = __hfma(v_scale, tmp, result);
}
return result;
}
// Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale, with x_map
__device__ __forceinline__ half2 dot_product_8_x_map
(
const half2 acc,
MatrixView_half& h_,
const int h_row,
const int h_column, // divisible by 8
MatrixView_q4_column& v_,
const int v_row, // divisible by 8
const int v_column,
const half2 v_scale_2,
const uint32_t v_zero, // + 1 (!!)
const int count,
const uint32_t* x_map
)
{
const half* h_ptr = h_.item_ptr(h_row, 0);
const uint32_t* x_map_ptr = x_map + h_column;
const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column);
half2 result = acc;
for (int i = 0; i < count; i++)
{
uint32_t v_read = *v_ptr; v_ptr += v_.width;
half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero);
half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero);
half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero);
half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero);
half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero);
half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero);
half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero);
half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero);
half2 v_01 = __halves2half2(v_0, v_1);
half2 v_23 = __halves2half2(v_2, v_3);
half2 v_45 = __halves2half2(v_4, v_5);
half2 v_67 = __halves2half2(v_6, v_7);
half h_0 = h_ptr[*x_map_ptr++];
half h_1 = h_ptr[*x_map_ptr++];
half h_2 = h_ptr[*x_map_ptr++];
half h_3 = h_ptr[*x_map_ptr++];
half h_4 = h_ptr[*x_map_ptr++];
half h_5 = h_ptr[*x_map_ptr++];
half h_6 = h_ptr[*x_map_ptr++];
half h_7 = h_ptr[*x_map_ptr++];
half2 h_01 = __halves2half2(h_0, h_1);
half2 h_23 = __halves2half2(h_2, h_3);
half2 h_45 = __halves2half2(h_4, h_5);
half2 h_67 = __halves2half2(h_6, h_7);
half2 tmp = __hmul2(h_01, v_01);
tmp = __hfma2(h_23, v_23, tmp);
tmp = __hfma2(h_45, v_45, tmp);
tmp = __hfma2(h_67, v_67, tmp);
result = __hfma2(v_scale_2, tmp, result);
}
return result;
}
__device__ __forceinline__ half dot_product_8_x_map_h
(
const half acc,
MatrixView_half& h_,
const int h_row,
const int h_column, // divisible by 8
MatrixView_q4_column& v_,
const int v_row, // divisible by 8
const int v_column,
const half v_scale,
const uint32_t v_zero, // + 1 (!!)
const int count,
const uint32_t* x_map
)
{
const half* h_ptr = h_.item_ptr(h_row, 0);
const uint32_t* x_map_ptr = x_map + h_column;
const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column);
half result = acc;
for (int i = 0; i < count; i++)
{
uint32_t v_read = *v_ptr; v_ptr += v_.width;
half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero);
half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero);
half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero);
half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero);
half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero);
half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero);
half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero);
half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero);
half tmp = __hmul(h_ptr[*x_map_ptr++], v_0);
tmp = __hfma(h_ptr[*x_map_ptr++], v_1, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_2, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_3, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_4, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_5, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_6, tmp);
tmp = __hfma(h_ptr[*x_map_ptr++], v_7, tmp);
result = __hfma(v_scale, tmp, result);
}
return result;
}
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh",
"repo_id": "text-generation-inference",
"token_count": 5380
} |
#ifndef _qdq_4_cuh
#define _qdq_4_cuh
#include "qdq_util.cuh"
#include "../../config.h"
#if QMODE_4BIT == 1
// Permutation:
//
// 77775555 33331111 66664444 22220000
__forceinline__ __device__ void shuffle_4bit_8
(
uint32_t* q,
int stride
)
{
uint32_t qa = q[0];
uint32_t qb = 0;
#pragma unroll
for (int i = 0; i < 4; i++)
{
uint32_t qa0 = qa & 0x0f;
uint32_t qa1 = (qa & 0xf0) >> 4;
qa >>= 8;
qb |= (qa1 << (i * 4 + 16));
qb |= (qa0 << (i * 4));
}
q[0] = qb;
}
__forceinline__ __device__ void dequant_4bit_8
(
const uint32_t q_0,
half2 (&dq)[4],
int stride
)
{
const uint32_t c0 = 0x64006400;
const half y16_ = __float2half_rn(1.0f / 16.0f);
const half2 y16 = __halves2half2(y16_, y16_);
const half z1_ = __float2half_rn(-1024.0f - 8.0f);
const half z16_ = __float2half_rn(-1024.0f / 16.0f - 8.0f);
const half2 z1 = __halves2half2(z1_, z1_);
const half2 z16 = __halves2half2(z16_, z16_);
uint32_t qa = q_0;
half2_uint32 q0((qa & 0x000f000f) | c0); // half2(q[ 0], q[ 1]) + 1024
half2_uint32 q1((qa & 0x00f000f0) | c0); // half2(q[ 2], q[ 3]) * 16 + 1024
qa >>= 8;
half2_uint32 q2((qa & 0x000f000f) | c0); // half2(q[ 4], q[ 5]) + 1024
half2_uint32 q3((qa & 0x00f000f0) | c0); // half2(q[ 6], q[ 7]) * 16 + 1024
dq[0] = __hadd2(q0.as_half2, z1);
dq[1] = __hfma2(q1.as_half2, y16, z16);
dq[2] = __hadd2(q2.as_half2, z1);
dq[3] = __hfma2(q3.as_half2, y16, z16);
}
__forceinline__ __device__ void dequant_4bit_8_prep_zero_scale
(
const uint32_t zero,
const half scale,
half2 (&z1z16)[2],
half2 (&y1y16)[2]
)
{
half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero);
half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero));
half2 scale2 = __half2half2(scale);
z1z16[0] = __hmul2(scale2, __half2half2(z1.as_half));
z1z16[1] = __hmul2(scale2, __half2half2(z16));
const half y1 = __float2half_rn(1.0f);
const half y16 = __float2half_rn(1.0f / 16.0f);
y1y16[0] = __hmul2(scale2, __half2half2(y1));
y1y16[1] = __hmul2(scale2, __half2half2(y16));
}
__forceinline__ __device__ void dequant_4bit_8_prep_zero
(
const uint32_t zero,
half2(&z1z16)[2],
half2(&y1y16)[2]
)
{
half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero);
half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero));
z1z16[0] = __half2half2(z1.as_half);
z1z16[1] = __half2half2(z16);
const half y1 = __float2half_rn(1.0f);
const half y16 = __float2half_rn(1.0f / 16.0f);
y1y16[0] = __half2half2(y1);
y1y16[1] = __half2half2(y16);
}
__forceinline__ __device__ void dequant_4bit_8_gptq
(
const uint32_t q_0,
half2 (&dq)[4],
half2 (&z1z16)[2],
half2 (&y1y16)[2],
int stride,
bool scaled
)
{
const uint32_t c0 = 0x64006400;
uint32_t qa = q_0;
half2_uint32 q0((qa & 0x000f000f) | c0); // half2( q[0] + 1024, q[1] + 1024 )
half2_uint32 q1((qa & 0x00f000f0) | c0); // half2( q[2] * 16 + 1024, q[3] * 16 + 1024 )
qa >>= 8;
half2_uint32 q2((qa & 0x000f000f) | c0); // half2( q[4] + 1024, q[5] + 1024 )
half2_uint32 q3((qa & 0x00f000f0) | c0); // half2( q[6] * 16 + 1024, q[7] * 16 + 1024 )
if (scaled)
{
dq[0] = __hfma2(q0.as_half2, y1y16[0], z1z16[0]); // half2( q[0] * s - z * s, q[1] * s - z * s)
dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] * s - z * s, q[3] * s - z * s)
dq[2] = __hfma2(q2.as_half2, y1y16[0], z1z16[0]);
dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]);
}
else
{
dq[0] = __hadd2(q0.as_half2, z1z16[0]); // half2( q[0] - z, q[1] - z )
dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] - z, q[3] - z )
dq[2] = __hadd2(q2.as_half2, z1z16[0]); // half2( q[4] - z, q[5] - z )
dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); // half2( q[6] - z, q[7] - z )
}
}
#else
__forceinline__ __device__ void shuffle_4bit_8
(
uint32_t* q,
int stride
)
{
}
__forceinline__ __device__ void dequant_4bit_8
(
const uint32_t q_0,
half2 (&dq)[4],
int stride
)
{
half dqh[8];
for (int i = 0; i < 8; i++) dqh[i] = dq_ns(exb(q_0, i * 4, 0x0f), 8);
for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]);
}
__forceinline__ __device__ void dequant_4bit_8_prep_zero_scale
(
const uint32_t zero,
const half scale,
half2 (&z1)[2],
half2 (&y1)[2]
)
{
half z = __int2half_rn(-((int)zero));
z = __hmul(z, scale);
z1[0] = __half2half2(z);
y1[0] = __half2half2(scale);
}
__forceinline__ __device__ void dequant_4bit_8_prep_zero
(
const uint32_t zero,
half2(&z1)[2],
half2(&y1)[2]
)
{
half z = __int2half_rn(-((int)zero));
z1[0] = __half2half2(z);
}
__forceinline__ __device__ void dequant_4bit_8_gptq
(
const uint32_t q_0,
half2 (&dq)[4],
half2 (&z1)[2],
half2 (&y1)[2],
int stride,
bool scaled
)
{
half2 dqh2[8];
uint32_t qa = q_0;
for (int i = 0; i < 4; i++)
{
half d0 = __int2half_rn(qa & 0x0f); qa >>= 4;
half d1 = __int2half_rn(qa & 0x0f); qa >>= 4;
dqh2[i] = __halves2half2(d0, d1);
}
if (scaled)
{
dq[0] = __hfma2(dqh2[0], y1[0], z1[0]);
dq[1] = __hfma2(dqh2[1], y1[0], z1[0]);
dq[2] = __hfma2(dqh2[2], y1[0], z1[0]);
dq[3] = __hfma2(dqh2[3], y1[0], z1[0]);
}
else
{
dq[0] = __hadd2(dqh2[0], z1[0]);
dq[1] = __hadd2(dqh2[1], z1[0]);
dq[2] = __hadd2(dqh2[2], z1[0]);
dq[3] = __hadd2(dqh2[3], z1[0]);
}
}
#endif
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh",
"repo_id": "text-generation-inference",
"token_count": 3279
} |
import pytest
import torch
from copy import copy
from transformers import AutoTokenizer
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
@pytest.fixture(scope="session")
def default_causal_lm():
return CausalLM.fallback("gpt2")
@pytest.fixture(scope="session")
def gpt2_tokenizer():
tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
tokenizer.pad_token_id = 50256
return tokenizer
@pytest.fixture
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
return generate_pb2.Request(
id=0,
inputs="Test",
input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
prefill_logprobs=True,
truncate=100,
parameters=default_pb_parameters,
stopping_parameters=default_pb_stop_parameters,
)
@pytest.fixture
def default_pb_batch(default_pb_request):
return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)
@pytest.fixture
def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
return CausalLMBatch.from_pb(
default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
)
@pytest.fixture
def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
req_0 = copy(default_pb_request)
req_0.id = 1
req_1 = default_pb_request
req_1.id = 2
req_1.stopping_parameters.max_new_tokens = 5
batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
return CausalLMBatch.from_pb(
batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
)
def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
batch = default_causal_lm_batch
assert batch.batch_id == default_pb_batch.id
assert batch.requests == default_pb_batch.requests
assert len(batch.input_ids) == default_pb_batch.size
assert batch.input_ids[0][-1] == 14402
assert torch.all(batch.input_ids[0][:-1] == 50256)
assert batch.attention_mask[0, 0] == 1
assert torch.all(batch.attention_mask[0, 1:] == 0)
assert batch.past_key_values is None
assert all(
[
torch.equal(input_ids, all_input_ids[:, 0])
for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
]
)
assert batch.input_lengths == [1]
assert len(batch) == default_pb_batch.size
assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
assert batch.max_input_length == batch.input_lengths[0]
def test_batch_concatenate_no_prefill(default_causal_lm_batch):
with pytest.raises(ValueError):
CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])
def test_causal_lm_batch_type(default_causal_lm):
assert default_causal_lm.batch_type == CausalLMBatch
def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
sequence_length = len(default_causal_lm_batch.all_input_ids[0])
generations, next_batch, _ = default_causal_lm.generate_token(
default_causal_lm_batch
)
assert len(generations) == len(next_batch)
assert isinstance(next_batch, CausalLMBatch)
assert len(next_batch.all_input_ids) == len(next_batch)
assert len(next_batch.all_input_ids[0]) == sequence_length + 1
assert len(next_batch.attention_mask[0]) == 11
assert next_batch.all_input_ids[0][-1] == 13
assert next_batch.all_input_ids[0][-2] == 14402
assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)
assert torch.all(next_batch.attention_mask[0][0:2] == 1)
assert torch.all(next_batch.attention_mask[0][2:] == 0)
assert next_batch.input_ids.shape == (len(next_batch), 1)
assert next_batch.input_ids[0, 0] == 13
assert next_batch.input_lengths == [2]
assert next_batch.max_input_length == next_batch.input_lengths[0]
assert next_batch.past_key_values is not None
assert all(
[p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all(
[p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
)
assert all([generation.generated_text is None for generation in generations])
assert all([len(generation.prefill_tokens) == 1 for generation in generations])
assert all(
[
token_id.item() == 13
for generation in generations
for token_id in generation.tokens.token_ids
]
)
assert all(
[
token_text == "."
for generation in generations
for token_text in generation.tokens.texts
]
)
assert generations[0].request_id == 0
def test_causal_lm_generate_token_completion(
default_causal_lm, default_causal_lm_batch
):
next_batch = default_causal_lm_batch
for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
def test_causal_lm_generate_token_completion_multi(
default_causal_lm, default_multi_requests_causal_lm_batch
):
next_batch = default_multi_requests_causal_lm_batch
for i in range(
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[1].generated_text.text == ".java:784)"
assert (
generations[1].request_id
== default_multi_requests_causal_lm_batch.requests[1].id
)
assert (
generations[1].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
)
# Copy stopping_criterias before filtering
stopping_criterias = (
default_multi_requests_causal_lm_batch.stopping_criterias.copy()
)
next_batch = next_batch.filter([next_batch.requests[0].id])
for _ in range(
stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert (
generations[0].request_id
== default_multi_requests_causal_lm_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
def test_batch_concatenate(
default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
):
next_batch_0 = default_causal_lm_batch
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
_, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
next_batch_1 = default_multi_requests_causal_lm_batch
_, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
# Clone past_key_values before concatenating to compare after,
# because they are removed from the concatenated batches
next_batch_0_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
]
next_batch_1_past_key_values = [
(k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
]
next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])
assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])
assert torch.all(
next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
)
assert torch.all(
next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
)
assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
assert next_batch.batch_id == 0
assert next_batch.input_ids[0, 0] == 12355
assert torch.all(next_batch.input_ids[1:] == 13)
assert next_batch.input_lengths == [3, 2, 2]
assert next_batch.max_input_length == 3
assert next_batch.requests[0] == next_batch_0.requests[0]
assert next_batch.requests[1:] == list(next_batch_1.requests)
assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers
assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias
assert next_batch.past_key_values is not None
assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
for i, past in enumerate(next_batch.past_key_values):
assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
assert torch.equal(
next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
)
assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
assert torch.equal(
next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
)
for _ in range(
default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 3
assert generations[2].generated_text.text == ".java:784)"
assert (
generations[2].request_id
== default_multi_requests_causal_lm_batch.requests[1].id
)
assert (
generations[2].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
)
next_batch = next_batch.filter(
[next_batch.requests[0].id, next_batch.requests[1].id]
)
for _ in range(
default_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
- 2
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is not None
assert len(generations) == 2
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert generations[0].request_id == default_causal_lm_batch.requests[0].id
assert (
generations[0].generated_text.generated_tokens
== default_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
next_batch = next_batch.filter([next_batch.requests[1].id])
for _ in range(
default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_causal_lm_batch.stopping_criterias[0].max_new_tokens
- default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
- 4
):
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert len(generations) == len(next_batch)
generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
assert next_batch is None
assert len(generations) == 1
assert generations[0].generated_text.text == ".java:784) at net.minecraft."
assert (
generations[0].request_id
== default_multi_requests_causal_lm_batch.requests[0].id
)
assert (
generations[0].generated_text.generated_tokens
== default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
)
| text-generation-inference/server/tests/models/test_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/tests/models/test_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 5390
} |
import torch
from typing import Dict, Optional, TypeVar
from text_generation_server.models.types import Batch
B = TypeVar("B", bound=Batch)
class Cache:
def __init__(self):
self.cache: Dict[int, B] = {}
def pop(self, batch_id: int) -> Optional[B]:
return self.cache.pop(batch_id, None)
def set(self, entry: B):
if entry is not None:
self.cache[entry.batch_id] = entry
def delete(self, batch_id: int):
batch = self.pop(batch_id)
if batch is not None:
del batch
if torch.cuda.is_available():
torch.cuda.empty_cache()
def clear(self):
keys = list(self.cache.keys())
for k in keys:
self.delete(k)
def __len__(self):
return len(self.cache.keys())
| text-generation-inference/server/text_generation_server/cache.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/cache.py",
"repo_id": "text-generation-inference",
"token_count": 359
} |
from dataclasses import dataclass
import bitsandbytes as bnb
import torch
from bitsandbytes.nn import Int8Params, Params4bit
from text_generation_server.utils.weights import UnquantizedWeight
@dataclass
class BNBWeight(UnquantizedWeight):
weight: torch.Tensor
def get_linear(self, bias: torch.Tensor):
return Linear8bitLt(self.weight, bias, has_fp16_weights=False, threshold=6.0)
class Linear8bitLt(torch.nn.Module):
def __init__(
self,
weight,
bias,
has_fp16_weights=True,
memory_efficient_backward=False,
threshold=0.0,
index=None,
):
super().__init__()
assert (
not memory_efficient_backward
), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
self.state = bnb.MatmulLtState()
self.index = index
# Necessary for stacked layers
self.state.threshold = threshold
self.state.has_fp16_weights = has_fp16_weights
self.state.memory_efficient_backward = memory_efficient_backward
if threshold > 0.0 and not has_fp16_weights:
self.state.use_pool = True
self.weight = Int8Params(
weight.data,
has_fp16_weights=has_fp16_weights,
requires_grad=has_fp16_weights,
)
self.weight.cuda(weight.device)
self.bias = bias
def init_8bit_state(self):
self.state.CB = self.weight.CB
self.state.SCB = self.weight.SCB
self.weight.CB = None
self.weight.SCB = None
def forward(self, x: torch.Tensor):
self.state.is_training = self.training
if self.weight.CB is not None:
self.init_8bit_state()
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
if not self.state.has_fp16_weights:
if self.state.CB is not None and self.state.CxB is not None:
# we converted 8-bit row major to turing/ampere format in the first inference pass
# we no longer need the row-major weight
del self.state.CB
self.weight.data = self.state.CxB
return out
@dataclass
class BNBFP4Weight(UnquantizedWeight):
weight: torch.Tensor
def get_linear(self, bias: torch.Tensor):
return Linear4bit(self.weight, bias, quant_type="fp4")
@dataclass
class BNBNF4Weight(UnquantizedWeight):
weight: torch.Tensor
def get_linear(self, bias: torch.Tensor):
return Linear4bit(self.weight, bias, quant_type="nf4")
class Linear4bit(torch.nn.Module):
def __init__(self, weight, bias, quant_type):
super().__init__()
self.weight = Params4bit(
weight.data,
requires_grad=False,
compress_statistics=True,
quant_type=quant_type,
)
self.compute_dtype = None
self.weight.cuda(weight.device)
self.bias = bias
def forward(self, x: torch.Tensor):
# weights are cast automatically as Int8Params, but the bias has to be cast manually
if self.bias is not None and self.bias.dtype != x.dtype:
self.bias.data = self.bias.data.to(x.dtype)
if getattr(self.weight, "quant_state", None) is None:
print(
"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first."
)
inp_dtype = x.dtype
if self.compute_dtype is not None:
x = x.to(self.compute_dtype)
bias = None if self.bias is None else self.bias.to(self.compute_dtype)
out = bnb.matmul_4bit(
x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state
)
out = out.to(inp_dtype)
return out
| text-generation-inference/server/text_generation_server/layers/bnb.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/bnb.py",
"repo_id": "text-generation-inference",
"token_count": 1825
} |
import time
import torch.nn as nn
import math
import json
import os
import torch
import transformers
from texttable import Texttable
from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer
from huggingface_hub import HfApi
from accelerate import init_empty_weights
from text_generation_server.utils import initialize_torch_distributed, Weights
from text_generation_server.utils.hub import weight_files
from text_generation_server.layers.gptq import QuantLinear
from loguru import logger
from typing import Optional
from text_generation_server.layers.gptq.utils import torch_snr_error
from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight
DEV = torch.device("cuda:0")
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer("maxq", torch.tensor(0))
self.register_buffer("scale", torch.zeros(shape))
self.register_buffer("zero", torch.zeros(shape))
def configure(
self,
bits,
perchannel=False,
sym=True,
mse=False,
norm=2.4,
grid=100,
maxshrink=0.8,
trits=False,
):
self.maxq = torch.tensor(2**bits - 1)
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
if trits:
self.maxq = torch.tensor(-1)
self.scale = torch.zeros_like(self.scale)
def _quantize(self, x, scale, zero, maxq):
if maxq < 0:
return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero
q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
return scale * (q - zero)
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if len(shape) == 4:
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if len(shape) == 3:
x = x.reshape((-1, shape[-1])).t()
if len(shape) == 2:
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = xmin < 0
if torch.any(tmp):
xmin[tmp] = -xmax[tmp]
tmp = (xmin == 0) & (xmax == 0)
xmin[tmp] = -1
xmax[tmp] = +1
if self.maxq < 0:
self.scale = xmax
self.zero = xmin
else:
self.scale = (xmax - xmin) / self.maxq
if self.sym:
self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)
else:
self.zero = torch.round(-xmin / self.scale)
if self.mse:
best = torch.full([x.shape[0]], float("inf"), device=dev)
for i in range(int(self.maxshrink * self.grid)):
p = 1 - i / self.grid
xmin1 = p * xmin
xmax1 = p * xmax
scale1 = (xmax1 - xmin1) / self.maxq
zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero
q = self._quantize(
x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq
)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = err < best
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if not self.perchannel:
if weight:
tmp = shape[0]
else:
tmp = shape[1] if len(shape) != 3 else shape[2]
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = [-1] + [1] * (len(shape) - 1)
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if len(shape) == 4:
self.scale = self.scale.reshape((1, -1, 1, 1))
self.zero = self.zero.reshape((1, -1, 1, 1))
if len(shape) == 3:
self.scale = self.scale.reshape((1, 1, -1))
self.zero = self.zero.reshape((1, 1, -1))
if len(shape) == 2:
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return self._quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return self.maxq > 0
def ready(self):
return torch.all(self.scale != 0)
class GPTQ:
def __init__(self, layer, observe=False):
self.layer = layer
self.dev = self.layer.weight.device
W = layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
self.rows = W.shape[0]
self.columns = W.shape[1]
self.H = torch.zeros((self.columns, self.columns), device=self.dev)
self.nsamples = 0
self.quantizer = Quantizer()
self.observe = observe
def add_batch(self, inp, out):
# Hessian H = 2 X XT + λ I
if self.observe:
self.inp1 = inp
self.out1 = out
else:
self.inp1 = None
self.out1 = None
if len(inp.shape) == 2:
inp = inp.unsqueeze(0)
tmp = inp.shape[0]
if isinstance(self.layer, nn.Linear) or isinstance(
self.layer, transformers.Conv1D
):
if len(inp.shape) == 3:
inp = inp.reshape((-1, inp.shape[-1]))
inp = inp.t()
if isinstance(self.layer, nn.Conv2d):
unfold = nn.Unfold(
self.layer.kernel_size,
dilation=self.layer.dilation,
padding=self.layer.padding,
stride=self.layer.stride,
)
inp = unfold(inp)
inp = inp.permute([1, 0, 2])
inp = inp.flatten(1)
self.H *= self.nsamples / (self.nsamples + tmp)
self.nsamples += tmp
# inp = inp.float()
inp = math.sqrt(2 / self.nsamples) * inp.float()
# self.H += 2 / self.nsamples * inp.matmul(inp.t())
self.H += inp.matmul(inp.t())
def print_loss(self, name, q_weight, weight_error, timecost):
table = Texttable()
length = 28
name = (
(name + " " * (length - len(name)))
if len(name) <= length
else name[:length]
)
table.header(["name", "weight_error", "fp_inp_SNR", "q_inp_SNR", "time"])
# assign weight
self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to(
self.layer.weight.data.dtype
)
if self.inp1 is not None:
# quantize input to int8
quantizer = Quantizer()
quantizer.configure(8, perchannel=False, sym=True, mse=False)
quantizer.find_params(self.inp1)
q_in = quantizer.quantize(self.inp1).type(torch.float16)
q_out = self.layer(q_in)
# get kinds of SNR
q_SNR = torch_snr_error(q_out, self.out1).item()
fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item()
else:
q_SNR = "-"
fp_SNR = "-"
table.add_row([name, weight_error, fp_SNR, q_SNR, timecost])
print(table.draw().split("\n")[-2])
def fasterquant(
self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name=""
):
self.layer.to(self.dev)
W = self.layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
W = W.float()
tick = time.time()
if not self.quantizer.ready():
self.quantizer.find_params(W, weight=True)
H = self.H
if not self.observe:
del self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
if act_order:
perm = torch.argsort(torch.diag(H), descending=True)
W = W[:, perm]
H = H[perm][:, perm]
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
diag = torch.arange(self.columns, device=self.dev)
H[diag, diag] += damp
H = torch.linalg.cholesky(H)
H = torch.cholesky_inverse(H)
try:
H = torch.linalg.cholesky(H, upper=True)
except Exception:
# Addition because Falcon fails on h_to_4h
H = torch.linalg.cholesky(
H + 1e-5 * torch.eye(H.shape[0]).to(H.device), upper=True
)
Hinv = H
g_idx = []
scale = []
zero = []
now_idx = 1
for i1 in range(0, self.columns, blocksize):
i2 = min(i1 + blocksize, self.columns)
count = i2 - i1
W1 = W[:, i1:i2].clone()
Q1 = torch.zeros_like(W1)
Err1 = torch.zeros_like(W1)
Losses1 = torch.zeros_like(W1)
Hinv1 = Hinv[i1:i2, i1:i2]
for i in range(count):
w = W1[:, i]
d = Hinv1[i, i]
if groupsize != -1:
if (i1 + i) % groupsize == 0:
self.quantizer.find_params(
W[:, (i1 + i) : (i1 + i + groupsize)], weight=True
)
if ((i1 + i) // groupsize) - now_idx == -1:
scale.append(self.quantizer.scale)
zero.append(self.quantizer.zero)
now_idx += 1
q = self.quantizer.quantize(w.unsqueeze(1)).flatten()
Q1[:, i] = q
Losses1[:, i] = (w - q) ** 2 / d**2
err1 = (w - q) / d
W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
Err1[:, i] = err1
Q[:, i1:i2] = Q1
Losses[:, i1:i2] = Losses1 / 2
W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
torch.cuda.synchronize()
error = torch.sum(Losses).item()
groupsize = groupsize if groupsize != -1 else self.columns
g_idx = [i // groupsize for i in range(self.columns)]
g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device)
if act_order:
invperm = torch.argsort(perm)
Q = Q[:, invperm]
g_idx = g_idx[invperm]
if isinstance(self.layer, transformers.Conv1D):
Q = Q.t()
self.print_loss(
name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick)
)
if scale == []:
scale.append(self.quantizer.scale)
zero.append(self.quantizer.zero)
scale = torch.cat(scale, dim=1)
zero = torch.cat(zero, dim=1)
return scale, zero, g_idx, error
def free(self):
self.inp1 = None
self.out1 = None
self.H = None
self.Losses = None
self.Trace = None
torch.cuda.empty_cache()
def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"train": "en/c4-train.00000-of-01024.json.gz"},
split="train",
use_auth_token=False,
)
valdata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
split="validation",
use_auth_token=False,
)
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]["text"], return_tensors="pt")
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
import random
random.seed(0)
valenc = []
for _ in range(256):
while True:
i = random.randint(0, len(valdata) - 1)
tmp = tokenizer(valdata[i]["text"], return_tensors="pt")
if tmp.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
valenc.append(tmp.input_ids[:, i:j])
valenc = torch.hstack(valenc)
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"train": "en/c4-train.00000-of-01024.json.gz"},
split="train",
)
valdata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
split="validation",
)
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]["text"], return_tensors="pt")
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
valenc = tokenizer(" ".join(valdata[:1100]["text"]), return_tensors="pt")
valenc = valenc.input_ids[:, : (256 * seqlen)]
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_loaders(
name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False
):
if "wikitext2" in name:
return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code)
if "ptb" in name:
if "new" in name:
return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code)
return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code)
if "c4" in name:
if "new" in name:
return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code)
return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code)
def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""):
# Skip last lm_head linear
# Need isintance Falcon is inheriting Linear.
if isinstance(module, layers) and "lm_head" not in name:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(
find_layers(
child, layers=layers, name=name + "." + name1 if name != "" else name1
)
)
return res
@torch.no_grad()
def sequential(
model,
dataloader,
dev,
nsamples,
bits,
groupsize,
*,
hooks,
percdamp=0.01,
sym: bool = False,
act_order: bool = False,
):
print("Starting ...")
use_cache = model.config.use_cache
model.config.use_cache = False
try:
layers = model.model.layers
prefix = "model.layers"
except Exception:
layers = model.transformer.h
prefix = "transformer.h"
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {"i": 0}
extra = {}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache["i"]] = inp
cache["i"] += 1
extra.update(kwargs.copy())
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].cuda())
except ValueError:
pass
layers[0] = layers[0].module
# layers[0] = layers[0].cpu()
# model.model.embed_tokens = model.model.embed_tokens.cpu()
# model.model.norm = model.model.norm.cpu()
torch.cuda.empty_cache()
for hook in hooks:
hook.remove()
outs = torch.zeros_like(inps)
extra = {
k: v.to(dev) if isinstance(v, torch.Tensor) else v for k, v in extra.items()
}
print("Ready.")
quantizers = {}
for i in range(len(layers)):
print(f"Quantizing layer {i+1}/{len(layers)}..")
print("+------------------+--------------+------------+-----------+-------+")
print("| name | weight_error | fp_inp_SNR | q_inp_SNR | time |")
print("+==================+==============+============+===========+=======+")
layer = layers[i]
layer.load()
full = find_layers(layer)
sequential = [list(full.keys())]
for names in sequential:
subset = {n: full[n] for n in names}
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer.configure(
bits, perchannel=True, sym=sym, mse=False
)
pass
def add_batch(name):
nonlocal gptq
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **extra)[0]
for h in handles:
h.remove()
for name in subset:
scale, zero, g_idx, error = gptq[name].fasterquant(
percdamp=percdamp,
groupsize=groupsize,
act_order=act_order,
name=name,
)
quantizers[f"{prefix}.{i}.{name}"] = (
gptq[name].quantizer.cpu(),
scale.cpu(),
zero.cpu(),
g_idx.cpu(),
bits,
groupsize,
)
gptq[name].free()
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **extra)[0]
layer.unload()
del layer
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
print("+------------------+--------------+------------+-----------+-------+")
print("\n")
model.config.use_cache = use_cache
return quantizers
def make_quant_linear(module, names, bits, groupsize, name=""):
if isinstance(module, QuantLinear):
return
for attr in dir(module):
tmp = getattr(module, attr)
name1 = name + "." + attr if name != "" else attr
if name1 in names:
delattr(module, attr)
setattr(
module,
attr,
QuantLinear.new(
bits,
groupsize,
tmp.in_features,
tmp.out_features,
tmp.bias is not None,
),
)
for name1, child in module.named_children():
make_quant_linear(
child, names, bits, groupsize, name + "." + name1 if name != "" else name1
)
# TODO: perform packing on GPU
def pack(model, quantizers, bits, groupsize):
layers = find_layers(model)
layers = {n: layers[n] for n in quantizers}
make_quant_linear(model, quantizers, bits, groupsize)
qlayers = find_layers(model, (QuantLinear,))
print("Packing ...")
for name in qlayers:
print(name)
quantizers[name], scale, zero, g_idx, _, _ = quantizers[name]
qlayers[name].pack(layers[name], scale, zero, g_idx)
print("Done.")
return model
def setdeepattr(module, full_name, tensor):
current = module
tokens = full_name.split(".")
for token in tokens[:-1]:
current = getattr(current, token)
setattr(current, tokens[-1], tensor)
def getdeepattr(module, full_name):
current = module
tokens = full_name.split(".")
for token in tokens:
current = getattr(current, token)
return current
def load_weights_pre_hook(module_name, weights, recursive=False):
def inner(module, args):
print(f"Pre hook {module_name}")
local_params = {}
for k, v in module.named_parameters():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for k, v in module.named_buffers():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for local_param in local_params:
current_tensor = getdeepattr(module, local_param)
if current_tensor.device == torch.device("meta"):
# print(f"Loading {local_param}")
if module_name:
tensor_name = f"{module_name}.{local_param}"
else:
tensor_name = local_param
tensor = weights.get_tensor(tensor_name)
setdeepattr(module, local_param, nn.Parameter(tensor))
else:
tensor = current_tensor.to(device=torch.device("cuda:0"))
if current_tensor.requires_grad:
tensor = nn.Parameter(tensor)
setdeepattr(module, local_param, tensor)
return inner
def load_weights_post_hook(module_name, weights, recursive=False):
def inner(module, args, output):
print(f"Post hook {module_name}")
local_params = {}
for k, v in module.named_parameters():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for k, v in module.named_buffers():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for local_param in local_params:
# print(f"Unloading {local_param}")
current_tensor = getdeepattr(module, local_param)
setdeepattr(
module,
local_param,
nn.Parameter(current_tensor.to(device=torch.device("cpu"))),
)
return output
return inner
def quantize(
model_id: str,
bits: int,
groupsize: int,
output_dir: str,
revision: str,
trust_remote_code: bool,
upload_to_model_id: Optional[str],
percdamp: float,
act_order: bool,
sym: bool,
):
print("loading model")
config = AutoConfig.from_pretrained(
model_id,
trust_remote_code=trust_remote_code,
)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(
config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code
)
model = model.eval()
print("LOADED model")
files = weight_files(model_id, revision, extension=".safetensors")
process_group, _, _ = initialize_torch_distributed()
weights = Weights(
files,
device=torch.device("cuda:0"),
dtype=torch.float16,
process_group=process_group,
aliases={"embed_tokens.weight": ["lm_head.weight"]},
weights_loader=DefaultWeightsLoader(UnquantizedWeight),
)
hooks = []
for name, module in model.named_modules():
def load(module, name):
def _load():
load_weights_pre_hook(name, weights, recursive=True)(module, None)
return _load
def unload(module, name):
def _unload():
load_weights_post_hook(name, weights, recursive=True)(
module, None, None
)
return _unload
module.load = load(module, name)
module.unload = unload(module, name)
hooks.append(
module.register_forward_pre_hook(load_weights_pre_hook(name, weights))
)
hooks.append(
module.register_forward_hook(load_weights_post_hook(name, weights))
)
model.seqlen = 2048
dataset = "wikitext2"
nsamples = 128
seed = None
dataloader, testloader = get_loaders(
dataset,
nsamples=nsamples,
seed=seed,
model_id=model_id,
seqlen=model.seqlen,
trust_remote_code=trust_remote_code,
)
tick = time.time()
quantizers = sequential(
model,
dataloader,
DEV,
nsamples,
bits,
groupsize,
percdamp=percdamp,
act_order=act_order,
hooks=hooks,
sym=sym,
)
print(time.time() - tick)
pack(model, quantizers, bits, groupsize)
from safetensors.torch import save_file
from huggingface_hub import split_torch_state_dict_into_shards
state_dict = model.state_dict()
state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()}
max_shard_size = "10GB"
state_dict_split = split_torch_state_dict_into_shards(
state_dict,
filename_pattern="model.safetensors",
max_shard_size=max_shard_size,
)
index = None
if state_dict_split.is_sharded:
index = {
"metadata": state_dict_split.metadata,
"weight_map": state_dict_split.tensor_to_filename,
}
shards = state_dict_split.filename_to_tensors
os.makedirs(output_dir, exist_ok=True)
for shard_file, shard in shards.items():
save_file(
shard,
os.path.join(output_dir, shard_file),
metadata={
"format": "pt",
"quantized": "gptq",
"origin": "text-generation-inference",
},
)
if index is None:
path_to_weights = os.path.join(output_dir, "model.safetensors")
logger.info(f"Model weights saved in {path_to_weights}")
else:
save_index_file = "model.safetensors.index.json"
save_index_file = os.path.join(output_dir, save_index_file)
with open(save_index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
logger.info(
f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
f"index located at {save_index_file}."
)
config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code)
config.quantization_config = {
"bits": bits,
"group_size": groupsize,
"damp_percent": percdamp,
"desc_act": act_order,
"static_groups": False,
"sym": sym,
"quant_method": "gptq",
}
config.save_pretrained(output_dir)
logger.info("Saved config")
logger.info("Saving tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
model_id, trust_remote_code=trust_remote_code
)
tokenizer.save_pretrained(output_dir)
logger.info("Saved tokenizer")
if upload_to_model_id:
api = HfApi()
api.upload_folder(
folder_path=output_dir, repo_id=upload_to_model_id, repo_type="model"
)
| text-generation-inference/server/text_generation_server/layers/gptq/quantize.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/quantize.py",
"repo_id": "text-generation-inference",
"token_count": 16305
} |
from dataclasses import dataclass
from typing import List, Optional
import torch
import torch.nn as nn
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.weights import Weights
from text_generation_server.layers.marlin.gptq import (
GPTQMarlinWeight,
GPTQMarlinWeightsLoader,
)
if SYSTEM == "cuda":
from moe_kernels.fused_marlin_moe import fused_marlin_moe
else:
fused_marlin_moe = None
try:
major, _minor = torch.cuda.get_device_capability()
has_sm_8_0 = major >= 8
except Exception:
has_sm_8_0 = False
def can_use_marlin_moe_gemm(
*,
quant_method: str,
quantize: str,
sym: bool,
):
return (
SYSTEM == "cuda"
and fused_marlin_moe is not None
and has_sm_8_0
and quantize in {"awq", "gptq"}
and quant_method in {"awq", "gptq"}
# We only support asymmetric quantization for AWQ.
and (sym or quant_method == "awq")
)
@dataclass
class GPTQMarlinMoEWeight:
qweight: torch.Tensor
qzeros: torch.Tensor
scales: torch.Tensor
g_idx: torch.Tensor
perm: torch.Tensor
is_full_k: bool
class GPTQMarlinSparseMoELayer(nn.Module):
"""
MoE layer that uses a fused GPTQ-Marlin kernel.
"""
def __init__(
self,
*,
n_expert_group: Optional[int],
n_experts: int,
prefix: str,
renormalize: bool,
topk: int,
topk_group: Optional[int],
weights: Weights,
gate_proj_name: str = "gate_proj",
up_proj_name: str = "up_proj",
down_proj_name: str = "down_proj",
scoring_func: Optional[str] = None,
e_score_correction_bias: Optional[float] = None,
):
assert scoring_func == "softmax", f"scoring func {scoring_func} is not handled"
assert e_score_correction_bias is None, "scoring correction bias is not handled"
super().__init__()
if not (
isinstance(weights.loader, GPTQMarlinWeightsLoader)
and can_use_marlin_moe_gemm(
quant_method=weights.loader.quant_method,
quantize=weights.loader.quantize,
sym=weights.loader.sym,
)
):
raise ValueError(
f"Unsupported weights loader: {type(weights.loader)}, only GPTQMarlinWeightsLoader with AWQ and symmetric GPTQ quantization is supported"
)
assert (n_expert_group is None) == (
topk_group is None
), "n_expert_group and topk_group must both be None or have some value"
self.n_expert_group = n_expert_group
self.topk = topk
self.topk_group = topk_group
self.renormalize = renormalize
self.gate_up_proj = _load_expert_multi_weights_col(
prefix=prefix,
n_experts=n_experts,
names=[gate_proj_name, up_proj_name],
weights=weights,
)
self.down_proj = _load_expert_weights_row(
prefix=prefix, n_experts=n_experts, name=down_proj_name, weights=weights
)
self.bits = weights.loader.bits
def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor:
return fused_marlin_moe(
hidden_states=x,
w1=self.gate_up_proj.qweight,
w2=self.down_proj.qweight,
w1_scale=self.gate_up_proj.scales,
w2_scale=self.down_proj.scales,
w1_zeros=(
self.gate_up_proj.qzeros
if self.gate_up_proj.qzeros.numel() > 0
else None
),
w2_zeros=(
self.down_proj.qzeros if self.down_proj.qzeros.numel() > 0 else None
),
g_idx1=self.gate_up_proj.g_idx,
g_idx2=self.down_proj.g_idx,
sort_indices1=self.gate_up_proj.perm,
sort_indices2=self.down_proj.perm,
is_k_full=self.gate_up_proj.is_full_k or self.down_proj.is_full_k,
gating_output=gating_output,
topk=self.topk,
renormalize=self.renormalize,
use_grouped_topk=self.n_expert_group is not None,
num_expert_group=self.n_expert_group,
topk_group=self.topk_group,
num_bits=self.bits,
)
def _load_expert_multi_weights_col(
*,
prefix: str,
n_experts: int,
names: List[str],
weights: Weights,
) -> GPTQMarlinMoEWeight:
moe_weight = None
for i in range(n_experts):
weight = weights.get_multi_weights_col(
[f"{prefix}.{i}.{name}" for name in names], 0
)
assert isinstance(weight, GPTQMarlinWeight)
moe_weight = _pack_weight(
n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight
)
assert moe_weight is not None
return moe_weight
def _load_expert_weights_row(
*,
prefix: str,
n_experts: int,
name: str,
weights: Weights,
) -> GPTQMarlinMoEWeight:
moe_weight = None
for i in range(n_experts):
weight = weights.get_weights_row(
f"{prefix}.{i}.{name}",
)
assert isinstance(weight, GPTQMarlinWeight)
moe_weight = _pack_weight(
n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight
)
assert moe_weight is not None
return moe_weight
def _pack_weight(
*,
n_experts: int,
expert: int,
moe_weight: Optional[GPTQMarlinMoEWeight],
weight: GPTQMarlinWeight,
) -> GPTQMarlinMoEWeight:
if moe_weight is None:
qweight = torch.empty(
(n_experts,) + weight.qweight.shape,
dtype=weight.qweight.dtype,
device=weight.qweight.device,
)
qzeros = torch.empty(
(n_experts,) + weight.qzeros.shape,
dtype=weight.qzeros.dtype,
device=weight.qzeros.device,
)
scales = torch.empty(
(n_experts,) + weight.scales.shape,
dtype=weight.scales.dtype,
device=weight.scales.device,
)
g_idx = torch.empty(
(n_experts,) + weight.g_idx.shape,
dtype=weight.g_idx.dtype,
device=weight.g_idx.device,
)
perm = torch.empty(
(n_experts,) + weight.perm.shape,
dtype=weight.perm.dtype,
device=weight.perm.device,
)
moe_weight = GPTQMarlinMoEWeight(
qweight=qweight,
qzeros=qzeros,
scales=scales,
g_idx=g_idx,
perm=perm,
is_full_k=weight.is_full_k,
)
moe_weight.qweight[expert] = weight.qweight
moe_weight.qzeros[expert] = weight.qzeros
moe_weight.scales[expert] = weight.scales
moe_weight.g_idx[expert] = weight.g_idx
moe_weight.perm[expert] = weight.perm
return moe_weight
| text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py",
"repo_id": "text-generation-inference",
"token_count": 3509
} |
def load_text_model(prefix, config, weights, name=None):
if config.model_type == "llama":
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
FlashLlamaForCausalLM,
)
return FlashLlamaForCausalLM(prefix, config, weights, name=name)
elif config.model_type == "mistral":
from text_generation_server.models.custom_modeling.flash_mistral_modeling import (
FlashMistralForCausalLM,
)
return FlashMistralForCausalLM(prefix, config, weights, name=name)
elif config.model_type == "gemma":
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
FlashGemmaForCausalLM,
)
return FlashGemmaForCausalLM(prefix, config, weights, causal=False)
elif config.model_type == "gemma2":
from text_generation_server.models.custom_modeling.flash_gemma2_modeling import (
FlashGemma2ForCausalLM,
)
return FlashGemma2ForCausalLM(prefix, config, weights)
elif config.model_type == "paligemma":
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
FlashGemmaForCausalLM,
)
return FlashGemmaForCausalLM(prefix, config, weights)
else:
raise RuntimeError(f"Unsupported model type {config.model_type}")
def load_vision_model(prefix, config, weights):
if config.model_type == "clip_vision_model":
from text_generation_server.models.custom_modeling.clip import (
CLIPVisionTransformer,
)
return CLIPVisionTransformer(
prefix=f"{prefix}.vision_model", config=config, weights=weights
)
if config.model_type == "siglip_vision_model":
from text_generation_server.models.custom_modeling.siglip import (
SiglipVisionTransformer,
)
return SiglipVisionTransformer(
prefix="vision_tower.vision_model", config=config, weights=weights
)
else:
raise RuntimeError(f"Unsupported model type {config.model_type}")
| text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py",
"repo_id": "text-generation-inference",
"token_count": 868
} |
import grpc
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.grpc._aio_server import (
OpenTelemetryAioServerInterceptor,
)
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
BatchSpanProcessor,
)
class UDSOpenTelemetryAioServerInterceptor(OpenTelemetryAioServerInterceptor):
def __init__(self):
super().__init__(trace.get_tracer(__name__))
def _start_span(self, handler_call_details, context, set_status_on_exception=False):
"""
Rewrite _start_span method to support Unix Domain Socket gRPC contexts
"""
# standard attributes
attributes = {
SpanAttributes.RPC_SYSTEM: "grpc",
SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0],
}
# if we have details about the call, split into service and method
if handler_call_details.method:
service, method = handler_call_details.method.lstrip("/").split("/", 1)
attributes.update(
{
SpanAttributes.RPC_METHOD: method,
SpanAttributes.RPC_SERVICE: service,
}
)
# add some attributes from the metadata
metadata = dict(context.invocation_metadata())
if "user-agent" in metadata:
attributes["rpc.user_agent"] = metadata["user-agent"]
# We use gRPC over a UNIX socket
attributes.update({SpanAttributes.NET_TRANSPORT: "unix"})
return self._tracer.start_as_current_span(
name=handler_call_details.method,
kind=trace.SpanKind.SERVER,
attributes=attributes,
set_status_on_exception=set_status_on_exception,
)
def setup_tracing(otlp_service_name: str, otlp_endpoint: str):
resource = Resource.create(attributes={"service.name": otlp_service_name})
span_exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True)
span_processor = BatchSpanProcessor(span_exporter)
trace.set_tracer_provider(TracerProvider(resource=resource))
trace.get_tracer_provider().add_span_processor(span_processor)
| text-generation-inference/server/text_generation_server/tracing.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/tracing.py",
"repo_id": "text-generation-inference",
"token_count": 969
} |
# Origin: https://github.com/predibase/lorax
# Path: lorax/server/lorax_server/utils/sgmv.py
# License: Apache License Version 2.0, January 2004
import os
import warnings
from functools import lru_cache
from typing import List, Tuple
import torch
import torch.nn.functional as F
try:
import punica_kernels as _kernels
HAS_SGMV = not bool(os.environ.get("DISABLE_SGMV", ""))
except ImportError:
warnings.warn("Could not import SGMV kernel from Punica, falling back to loop.")
_kernels = None
HAS_SGMV = False
MIN_SGMV_RANK = 8
MIN_RANK_CUSTOM = 16
MAX_RANK_CUSTOM = 128
SGMV_BLOCK_SIZE = 16
BGMV_MAX_RANK = 64
def has_sgmv() -> bool:
return HAS_SGMV
def pad_rank(t: torch.Tensor, dim: int, world_size: int) -> torch.Tensor:
"""Pad a tensor to the minimum rank for SGMV and the nearest multiple of the SGMV block size."""
if not has_sgmv():
return t
# tensor parallelism will result in effective rank being divided by world_size,
# so we need to scale the min rank to offset that effect
min_rank = MIN_SGMV_RANK * world_size
# if we're at or below the min rank, pad up to the min rank
# otherwise, pad to the nearest multiple of the block size
current_rank = t.size(dim)
target_rank = (
min_rank
if current_rank <= min_rank
else (current_rank + SGMV_BLOCK_SIZE - 1) // SGMV_BLOCK_SIZE * SGMV_BLOCK_SIZE
)
if current_rank == target_rank:
return t
pad_size = target_rank - current_rank
# see complicatd pad syntax here: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
pad = [0, 0] * t.dim()
pad[(t.dim() - dim - 1) * 2 + 1] = pad_size
pad = tuple(pad)
return F.pad(t, pad, mode="constant", value=0.0)
def use_cutlass_shrink(lora_rank: int) -> bool:
return lora_rank < MIN_RANK_CUSTOM
def orient_for_rank(t: torch.Tensor, rank: int) -> torch.Tensor:
if MIN_RANK_CUSTOM <= rank <= MAX_RANK_CUSTOM:
return t.transpose(0, 1)
return t
# Source: https://github.com/punica-ai/punica/blob/master/src/punica/ops/__init__.py
def add_lora_sgmv_cutlass(
y: torch.Tensor,
x: torch.Tensor,
wa_ptr: torch.Tensor,
wb_ptr: torch.Tensor,
s_start: torch.Tensor,
s_end: torch.Tensor,
layer_idx: int,
lora_rank: int,
):
"""
Semantics:
y[s[i]:s[i+1]] += x[s[i]:s[i+1]] @ deref(wa_ptr[i]).T @ deref(wb_ptr[i])
Args:
y: Shape: `[B, H2]`. Output vectors. Will be changed in-place.
x: Shape: `[B, H1]`. Input vectors.
wa_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\
Weight matrix shape: `[num_layers, R, H1]`.
wb_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\
Weight matrix shape: `[num_layers, R, H2]`.
s_start: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices start indices.
s_end: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices end indices.
layer_idx: Layer index of the weight matrices.
"""
if lora_rank < MIN_RANK_CUSTOM or lora_rank > MAX_RANK_CUSTOM:
# Custom SGMV shrink only supports rank 16, 32, 64, 128
_add_lora_sgmv_cutlass_legacy(
y, x, wa_ptr, wb_ptr, s_start, s_end, layer_idx, lora_rank
)
return
tmp1 = torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=x.device)
tmp2_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0))
tmp2 = torch.empty((tmp2_size,), dtype=torch.uint8, device=x.device)
v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device)
_kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp1, layer_idx)
_kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp2, layer_idx)
def _add_lora_sgmv_cutlass_legacy(
y: torch.Tensor,
x: torch.Tensor,
wa_ptr: torch.Tensor,
wb_ptr: torch.Tensor,
s_start: torch.IntTensor,
s_end: torch.IntTensor,
layer_idx: int,
lora_rank: int,
):
tmp_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0))
tmp = torch.empty((tmp_size,), dtype=torch.uint8, device=x.device)
v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device)
_kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx)
_kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx)
@lru_cache(maxsize=1)
def get_tmp_tensor(device: torch.device) -> torch.Tensor:
return torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=device)
@lru_cache(maxsize=32)
def get_tmp_tensor_for_size(size: int, device: torch.device) -> torch.Tensor:
tmp_size = _kernels.sgmv_cutlass_tmp_size(size)
return torch.empty((tmp_size,), dtype=torch.uint8, device=device)
def get_tmp_tensor_for_size_no_kernels(size: int, device: torch.device) -> torch.Tensor:
return torch.empty((size,), dtype=torch.uint8, device=device)
def get_tmp_expand_size(size: int) -> int:
return _kernels.sgmv_cutlass_tmp_size(size)
def get_tmp_tensors(
nsegments: int, lora_rank: int, device: torch.device
) -> Tuple[torch.Tensor, torch.Tensor]:
use_cutlass = use_cutlass_shrink(lora_rank) and has_sgmv()
has_sgmv_available = has_sgmv()
if use_cutlass:
tmp = get_tmp_tensor_for_size(nsegments, device)
return tmp, tmp
elif has_sgmv_available:
return get_tmp_tensor(device), get_tmp_tensor_for_size(nsegments, device)
else:
tmp = get_tmp_tensor_for_size(nsegments, device)
return tmp, tmp
def lora_a_sgmv_cutlass(
x: torch.Tensor,
tmp: torch.Tensor,
wa_ptr: torch.Tensor,
s_start: torch.IntTensor,
s_end: torch.IntTensor,
layer_idx: int,
lora_rank: int,
) -> torch.Tensor:
v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device)
if MIN_RANK_CUSTOM <= lora_rank <= MAX_RANK_CUSTOM:
_kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp, layer_idx)
else:
_kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx)
return v
def lora_b_sgmv_cutlass(
y: torch.Tensor,
v: torch.Tensor,
tmp: torch.Tensor,
wb_ptr: torch.Tensor,
s_start: torch.IntTensor,
s_end: torch.IntTensor,
layer_idx: int,
):
_kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx)
"""
Semantics:
y[i] += (
x[i].unsqueeze(0)
@ wa_T_all[indices[i], layer_idx, :, :].transpose(-1, -2)
@ wb_T_all[indices[i], layer_idx, :, :].transpose(-1, -2)
* scale
).squeeze(0)
Args:
y: Shape: `[B, H2]`. Output vectors. Will be changed in-place.
v: Shape: `[B, R]`. Temporary vector.
x: Shape: `[B, H1]`. Input vectors.
wa_T_all: Shape: `[None, L, R, H1]`. All of the transposed LoRA A matrices.
wb_T_all: Shape: `[None, L, H2, R]`. All of the transposed LoRA B matrices.
indicies: Shape: `[B]`. Indices of the LoRA weights.
layer_idx: Layer index of LoRA weights.
scale: Scaling factor.
"""
def add_lora_a_bgmv(
v: torch.Tensor,
x: torch.Tensor,
wa_T_all: torch.Tensor,
indicies: torch.LongTensor,
layer_idx: int,
):
_kernels.dispatch_bgmv(v, x, wa_T_all, indicies, layer_idx, 1.0)
def add_lora_b_bgmv(
y: torch.Tensor,
v: torch.Tensor,
wb_T_all: torch.Tensor,
indicies: torch.LongTensor,
layer_idx: int,
):
_kernels.dispatch_bgmv(y, v, wb_T_all, indicies, layer_idx, 1.0)
def segmented_matmul(
y: torch.Tensor,
x: torch.Tensor,
w: List[torch.Tensor],
b: List[torch.Tensor],
s_start: torch.IntTensor,
s_end: torch.IntTensor,
):
for i in range(len(w)):
if s_end[i] - s_start[i] <= 0:
continue
xi = x[s_start[i] : s_end[i]]
wi = w[i]
bi = b[i]
y[s_start[i] : s_end[i]] = F.linear(xi, wi, bi)
| text-generation-inference/server/text_generation_server/utils/sgmv.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/sgmv.py",
"repo_id": "text-generation-inference",
"token_count": 3651
} |
extern crate tokenizers as tk;
use crate::encoding::*;
use crate::tokenizer::Tokenizer;
use napi::bindgen_prelude::*;
use tk::tokenizer::{EncodeInput, Encoding};
pub struct EncodeTask<'s> {
pub tokenizer: Tokenizer,
pub input: Option<EncodeInput<'s>>,
pub add_special_tokens: bool,
}
impl Task for EncodeTask<'static> {
type Output = Encoding;
type JsValue = JsEncoding;
fn compute(&mut self) -> Result<Self::Output> {
self
.tokenizer
.tokenizer
.read()
.unwrap()
.encode_char_offsets(
self
.input
.take()
.ok_or(Error::from_reason("No provided input"))?,
self.add_special_tokens,
)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(JsEncoding {
encoding: Some(output),
})
}
}
pub struct DecodeTask {
pub tokenizer: Tokenizer,
pub ids: Vec<u32>,
pub skip_special_tokens: bool,
}
impl Task for DecodeTask {
type Output = String;
type JsValue = String;
fn compute(&mut self) -> Result<Self::Output> {
self
.tokenizer
.tokenizer
.read()
.unwrap()
.decode(&self.ids, self.skip_special_tokens)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(output)
}
}
pub struct EncodeBatchTask<'s> {
pub tokenizer: Tokenizer,
pub inputs: Option<Vec<EncodeInput<'s>>>,
pub add_special_tokens: bool,
}
impl Task for EncodeBatchTask<'static> {
type Output = Vec<Encoding>;
type JsValue = Vec<JsEncoding>;
fn compute(&mut self) -> Result<Self::Output> {
self
.tokenizer
.tokenizer
.read()
.unwrap()
.encode_batch_char_offsets(
self
.inputs
.take()
.ok_or(Error::from_reason("No provided input"))?,
self.add_special_tokens,
)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(
output
.into_iter()
.map(|encoding| JsEncoding {
encoding: Some(encoding),
})
.collect(),
)
}
}
pub struct DecodeBatchTask {
pub tokenizer: Tokenizer,
pub ids: Vec<Vec<u32>>,
pub skip_special_tokens: bool,
}
impl Task for DecodeBatchTask {
type Output = Vec<String>;
type JsValue = Vec<String>;
fn compute(&mut self) -> Result<Self::Output> {
let ids: Vec<_> = self.ids.iter().map(|s| s.as_slice()).collect();
self
.tokenizer
.tokenizer
.read()
.unwrap()
.decode_batch(&ids, self.skip_special_tokens)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> {
Ok(output)
}
}
| tokenizers/bindings/node/src/tasks/tokenizer.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tasks/tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 1295
} |
from typing import List
import jieba
from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import BPE
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
class JiebaPreTokenizer:
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# we need to call `str(normalized_string)` because jieba expects a str,
# not a NormalizedString
for token, start, stop in jieba.tokenize(str(normalized_string)):
splits.append(normalized_string[start:stop])
return splits
# We can also easily do it in one line:
# return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))]
def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
# Just an odd example...
splits = []
last = 0
for i, char in enumerate(str(normalized_string)):
if char.isnumeric() and int(char) % 2 == 1:
splits.append(normalized_string[last:i])
last = i
# Don't forget the last one
splits.append(normalized_string[last:])
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
# Let's call split on the PreTokenizedString to split using `self.jieba_split`
pretok.split(self.jieba_split)
# Here we can call `pretok.split` multiple times if we want to apply
# different algorithm, but we generally just need to call it once.
pretok.split(self.odd_number_split)
class CustomDecoder:
def decode(self, tokens: List[str]) -> str:
return "".join(tokens)
class CustomNormalizer:
def normalize(self, normalized: NormalizedString):
# Most of these can be replaced by a `Sequence` combining some provided Normalizer,
# (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ])
# and it should be the preferred way. That being said, here is an example of the kind
# of things that can be done here:
normalized.nfkc()
normalized.filter(lambda char: not char.isnumeric())
normalized.replace(Regex("\s+"), " ")
normalized.lowercase()
# This section shows how to attach these custom components to the Tokenizer
tok = Tokenizer(BPE())
tok.normalizer = Normalizer.custom(CustomNormalizer())
tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer())
tok.decoder = Decoder.custom(CustomDecoder())
input = "æ°žåæè£
饰åæéå
¬åž"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('æ°žå', (0, 2)), ('æè£
', (2, 4)), ('饰å', (4, 6)), ('æéå
¬åž', (6, 10))]
input = "112233"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))]
input = "1234 âð¢ð©ð©ð¬ ð±ð¥ð¢ð¯ð¢ ðð ð¹â¯ð¶ð ðððð£ ðð£ðððð!"
print("Normalize:", input)
print(tok.normalizer.normalize_str(input))
# " hello there my dear dear friend!"
| tokenizers/bindings/python/examples/custom_components.py/0 | {
"file_path": "tokenizers/bindings/python/examples/custom_components.py",
"repo_id": "tokenizers",
"token_count": 1292
} |
import json
import os
from typing import Iterator, List, Optional, Union, Tuple
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.models import Unigram
from .base_tokenizer import BaseTokenizer
class SentencePieceUnigramTokenizer(BaseTokenizer):
"""SentencePiece Unigram Tokenizer
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[List[Tuple[str, float]]] = None,
replacement: str = "â",
add_prefix_space: bool = True,
):
if vocab is not None:
# Let Unigram(..) fail if only one of them is None
tokenizer = Tokenizer(Unigram(vocab))
else:
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence(
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
)
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
):
"""
Train the model using the given files
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
length: Optional[int] = None,
):
"""
Train the model using the given iterator
Args:
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
Any iterator over strings or list of strings
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
@staticmethod
def from_spm(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
vocab = [(piece.piece, piece.score) for piece in m.pieces]
unk_id = m.trainer_spec.unk_id
model_type = m.trainer_spec.model_type
byte_fallback = m.trainer_spec.byte_fallback
if model_type != 1:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
replacement = "â"
add_prefix_space = True
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
if precompiled_charsmap:
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Precompiled(precompiled_charsmap),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
else:
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceUnigram",
}
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
BaseTokenizer.__init__(obj, tokenizer, parameters)
return obj
| tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py",
"repo_id": "tokenizers",
"token_count": 3405
} |
import transformers
from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer
from tokenizers.processors import TemplateProcessing
from tokenizers.models import Unigram, BPE
from tokenizers import decoders
from tokenizers import Tokenizer, Regex
from tokenizers.normalizers import (
StripAccents,
NFKD,
Lowercase,
Sequence,
BertNormalizer,
Precompiled,
Replace,
)
from tokenizers.pre_tokenizers import (
Digits,
WhitespaceSplit,
Metaspace,
Sequence as PSequence,
)
import json
import unicodedata
import sys
import os
import datetime
import argparse
sys.path.append(".")
from spm_parity_check import check_details
from sentencepiece_extractor import SentencePieceExtractor
def check_number_comma(piece: str) -> bool:
return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit()
def get_proto(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
return m
class Converter:
def __init__(self, original_tokenizer):
self.original_tokenizer = original_tokenizer
def converted(self) -> Tokenizer:
raise NotImplementedError()
class SpmConverter(Converter):
def __init__(self, *args):
super().__init__(*args)
self.proto = get_proto(self.original_tokenizer.vocab_file)
def vocab(self, proto):
return [(piece.piece, piece.score) for piece in proto.pieces]
def unk_id(self, proto):
return proto.trainer_spec.unk_id
def tokenizer(self, proto):
model_type = proto.trainer_spec.model_type
vocab = self.vocab(proto)
unk_id = self.unk_id(proto)
if model_type == 1:
tokenizer = Tokenizer(Unigram(vocab, unk_id))
elif model_type == 2:
vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract()
tokenizer = Tokenizer(BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True))
else:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
return tokenizer
def normalizer(self, proto):
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")])
def post_processor(self, tokenizer):
return None
def converted(self):
tokenizer = self.tokenizer(self.proto)
# Tokenizer assemble
tokenizer.normalizer = self.normalizer(self.proto)
replacement = "â"
prepend_scheme = "always"
tokenizer.pre_tokenizer = Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
post_processor = self.post_processor(tokenizer)
if post_processor:
tokenizer.post_processor = post_processor
# TODO what parameters should we give ?
parameters = {}
return BaseTokenizer(tokenizer, parameters)
class AlbertConverter(SpmConverter):
def vocab(self, proto):
return [
(piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
for piece in proto.pieces
]
def normalizer(self, proto):
normalizers = [Replace("``", '"'), Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
normalizers.append(NFKD())
normalizers.append(StripAccents())
if self.original_tokenizer.do_lower_case:
normalizers.append(Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
normalizers.append(Precompiled(precompiled_charsmap))
normalizers.append(Replace(Regex(" {2,}"), " "))
return Sequence(normalizers)
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["[CLS]", "$0", "[SEP]"],
seq_b=["$1", "[SEP]"],
special_tokens=[
("[CLS]", tokenizer.get_vocab()["[CLS]"]),
("[SEP]", tokenizer.get_vocab()["[SEP]"]),
],
)
class CamembertConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>NOTUSED", 0.0),
("<pad>", 0.0),
("</s>NOTUSED", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces]
return vocab
def unk_id(self, proto):
# See vocab unk position
return 3
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["<s>", "$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[
("<s>", tokenizer.get_vocab()["<s>"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class MBartConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [
("ar_AR", 0.0),
("cs_CZ", 0.0),
("de_DE", 0.0),
("en_XX", 0.0),
("es_XX", 0.0),
("et_EE", 0.0),
("fi_FI", 0.0),
("fr_XX", 0.0),
("gu_IN", 0.0),
("hi_IN", 0.0),
("it_IT", 0.0),
("ja_XX", 0.0),
("kk_KZ", 0.0),
("ko_KR", 0.0),
("lt_LT", 0.0),
("lv_LV", 0.0),
("my_MM", 0.0),
("ne_NP", 0.0),
("nl_XX", 0.0),
("ro_RO", 0.0),
("ru_RU", 0.0),
("si_LK", 0.0),
("tr_TR", 0.0),
("vi_VN", 0.0),
("zh_CN", 0.0),
]
return vocab
def unk_id(self, proto):
return 3
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "</s>", "en_XX"],
seq_b=["$1", "</s>"],
special_tokens=[
("en_XX", tokenizer.get_vocab()["en_XX"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class XLMRobertaConverter(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["<s>", "$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[
("<s>", tokenizer.get_vocab()["<s>"]),
("</s>", tokenizer.get_vocab()["</s>"]),
],
)
class XLNetConverter(SpmConverter):
def vocab(self, proto):
return [
(piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100)
for piece in proto.pieces
]
def normalizer(self, proto):
normalizers = [Replace("``", '"'), Replace("''", '"')]
if not self.original_tokenizer.keep_accents:
normalizers.append(NFKD())
normalizers.append(StripAccents())
if self.original_tokenizer.do_lower_case:
normalizers.append(Lowercase())
precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap
normalizers.append(Precompiled(precompiled_charsmap))
normalizers.append(Replace(Regex(" {2,}"), " "))
return Sequence(normalizers)
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "<sep>", "<cls>"],
seq_b=["$1", "<sep>"],
special_tokens=[
("<sep>", tokenizer.get_vocab()["<sep>"]),
("<cls>", tokenizer.get_vocab()["<cls>"]),
],
)
class ReformerConverter(SpmConverter):
pass
class PegasusConverter(SpmConverter):
offset = 103
def vocab(self, proto):
vocab = [
(self.original_tokenizer.pad_token, 0),
(self.original_tokenizer.eos_token, 0),
]
vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]]
return vocab
def unk_id(self, proto):
return proto.trainer_spec.unk_id + self.offset
def post_processor(self, tokenizer):
eos = self.original_tokenizer.eos_token
return TemplateProcessing(
seq_a=["$0", eos],
seq_b=["$1", eos],
special_tokens=[(eos, tokenizer.get_vocab()[eos])],
)
class T5Converter(SpmConverter):
def post_processor(self, tokenizer):
return TemplateProcessing(
seq_a=["$0", "</s>"],
seq_b=["$1", "</s>"],
special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])],
)
CONVERTERS = {
"AlbertTokenizer": AlbertConverter,
"CamembertTokenizer": CamembertConverter,
"XLMRobertaTokenizer": XLMRobertaConverter,
"MBartTokenizer": MBartConverter,
"XLNetTokenizer": XLNetConverter,
"ReformerTokenizer": ReformerConverter,
"PegasusTokenizer": PegasusConverter,
"T5Tokenizer": T5Converter,
}
def check(pretrained, filename):
transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained)
converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__]
tokenizer = converter_class(transformer_tokenizer).converted()
now = datetime.datetime.now
trans_total_time = datetime.timedelta(seconds=0)
tok_total_time = datetime.timedelta(seconds=0)
with open(filename, "r") as f:
for i, line in enumerate(f):
line = line.strip()
start = now()
ids = transformer_tokenizer.encode(line)
trans = now()
tok_ids = tokenizer.encode(line).ids
tok = now()
trans_total_time += trans - start
tok_total_time += tok - trans
if ids != tok_ids:
if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer):
continue
assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}"
tokenizer.save(f"{pretrained.replace('/', '-')}.json")
return ("OK", trans_total_time / tok_total_time)
def main():
pretraineds = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
"camembert-base",
"xlm-roberta-base",
"xlm-roberta-large",
"xlm-roberta-large-finetuned-conll02-dutch",
"xlm-roberta-large-finetuned-conll02-spanish",
"xlm-roberta-large-finetuned-conll03-english",
"xlm-roberta-large-finetuned-conll03-german",
"facebook/mbart-large-en-ro",
"facebook/mbart-large-cc25",
"xlnet-base-cased",
"xlnet-large-cased",
"google/reformer-crime-and-punishment",
"t5-small",
"google/pegasus-large",
]
parser = argparse.ArgumentParser()
parser.add_argument(
"--filename",
required=True,
type=str,
help="The filename that we are going to encode in both versions to check that conversion worked",
)
parser.add_argument(
"--models",
type=lambda s: s.split(","),
default=pretraineds,
help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})",
)
args = parser.parse_args()
print(args.filename)
model_len = 50
status_len = 6
speedup_len = 8
print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|")
print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|")
for pretrained in args.models:
status, speedup = check(pretrained, args.filename)
print(f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|")
if __name__ == "__main__":
main()
| tokenizers/bindings/python/scripts/convert.py/0 | {
"file_path": "tokenizers/bindings/python/scripts/convert.py",
"repo_id": "tokenizers",
"token_count": 6304
} |
use std::marker::PhantomData;
use std::sync::{Arc, Mutex};
mod iterators;
mod normalization;
mod pretokenization;
mod regex;
pub mod serde_pyo3;
pub use iterators::*;
pub use normalization::*;
pub use pretokenization::*;
pub use regex::*;
// RefMut utils
pub trait DestroyPtr {
fn destroy(&mut self);
}
pub struct RefMutGuard<'r, T: DestroyPtr> {
content: T,
r: PhantomData<&'r mut T>,
}
impl<T: DestroyPtr> RefMutGuard<'_, T> {
pub fn new(content: T) -> Self {
Self {
content,
r: PhantomData,
}
}
pub fn get(&self) -> &T {
&self.content
}
}
impl<T: DestroyPtr> Drop for RefMutGuard<'_, T> {
fn drop(&mut self) {
self.content.destroy()
}
}
#[derive(Clone)]
pub struct RefMutContainer<T> {
inner: Arc<Mutex<Option<*mut T>>>,
}
impl<T> RefMutContainer<T> {
pub fn new(content: &mut T) -> Self {
Self {
inner: Arc::new(Mutex::new(Some(content))),
}
}
pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> {
let lock = self.inner.lock().unwrap();
let ptr = lock.as_ref()?;
Some(f(unsafe { ptr.as_ref().unwrap() }))
}
pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> {
let lock = self.inner.lock().unwrap();
let ptr = lock.as_ref()?;
Some(f(unsafe { ptr.as_mut().unwrap() }))
}
}
impl<T> DestroyPtr for RefMutContainer<T> {
fn destroy(&mut self) {
self.inner.lock().unwrap().take();
}
}
unsafe impl<T: Send> Send for RefMutContainer<T> {}
unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
| tokenizers/bindings/python/src/utils/mod.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/mod.rs",
"repo_id": "tokenizers",
"token_count": 752
} |
import copy
import os
import pickle
import pytest
from tokenizers import (
AddedToken,
SentencePieceUnigramTokenizer,
Tokenizer,
models,
normalizers,
pre_tokenizers,
trainers,
)
from ..utils import data_dir, train_files
class TestBpeTrainer:
def test_can_modify(self):
trainer = trainers.BpeTrainer(
vocab_size=12345,
min_frequency=12,
show_progress=False,
special_tokens=["1", "2"],
limit_alphabet=13,
initial_alphabet=["a", "b", "c"],
continuing_subword_prefix="pref",
end_of_word_suffix="suf",
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1", special=True),
AddedToken("2", special=True),
]
assert trainer.limit_alphabet == 13
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
assert trainer.continuing_subword_prefix == "pref"
assert trainer.end_of_word_suffix == "suf"
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.limit_alphabet = None
assert trainer.limit_alphabet == None
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
trainer.continuing_subword_prefix = None
assert trainer.continuing_subword_prefix == None
trainer.end_of_word_suffix = None
assert trainer.continuing_subword_prefix == None
def test_can_pickle(self):
assert (
trainers.BpeTrainer(min_frequency=12).__getstate__()
== b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}"""
)
assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer)
assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer)
# Make sure everything is correct
assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps(
trainers.BpeTrainer(min_frequency=12)
)
class TestWordPieceTrainer:
def test_can_modify(self):
trainer = trainers.WordPieceTrainer(
vocab_size=12345,
min_frequency=12,
show_progress=False,
special_tokens=["1", "2"],
limit_alphabet=13,
initial_alphabet=["a", "b", "c"],
continuing_subword_prefix="pref",
end_of_word_suffix="suf",
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1", special=True),
AddedToken("2", special=True),
]
assert trainer.limit_alphabet == 13
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
assert trainer.continuing_subword_prefix == "pref"
assert trainer.end_of_word_suffix == "suf"
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.limit_alphabet = None
assert trainer.limit_alphabet == None
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
trainer.continuing_subword_prefix = None
assert trainer.continuing_subword_prefix == None
trainer.end_of_word_suffix = None
assert trainer.continuing_subword_prefix == None
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer)
class TestWordLevelTrainer:
def test_can_modify(self):
trainer = trainers.WordLevelTrainer(
vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"]
)
assert trainer.vocab_size == 12345
assert trainer.min_frequency == 12
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1", special=True),
AddedToken("2", special=True),
]
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.min_frequency = 1
assert trainer.min_frequency == 1
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer)
class TestUnigram:
def test_train(self, train_files):
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(train_files["small"], show_progress=False)
filename = "tests/data/unigram_trained.json"
tokenizer.save(filename)
os.remove(filename)
def test_train_parallelism_with_custom_pretokenizer(self, train_files):
class GoodCustomPretok:
def split(self, n, normalized):
# Here we just test that we can return a List[NormalizedString], it
# does not really make sense to return twice the same otherwise
return [normalized, normalized]
def pre_tokenize(self, pretok):
pretok.split(self.split)
custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok())
bpe_tokenizer = Tokenizer(models.BPE())
bpe_tokenizer.normalizer = normalizers.Lowercase()
bpe_tokenizer.pre_tokenizer = custom
if "TOKENIZERS_PARALLELISM" in os.environ:
del os.environ["TOKENIZERS_PARALLELISM"]
trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False)
bpe_tokenizer.train([train_files["small"]], trainer=trainer)
def test_can_pickle(self):
assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer)
def test_train_with_special_tokens(self):
filename = "tests/data/dummy-unigram-special_tokens-train.txt"
with open(filename, "w") as f:
f.write(
"""
[CLS] The Zen of Python, by Tim Peters [SEP]
[CLS] Beautiful is better than ugly. [SEP]
[CLS] Explicit is better than implicit. [SEP]
[CLS] Simple is better than complex. [SEP]
[CLS] Complex is better than complicated. [SEP]
[CLS] Flat is better than nested. [SEP]
[CLS] Sparse is better than dense. [SEP]
[CLS] Readability counts. [SEP]
[CLS] Special cases aren't special enough to break the rules. [SEP]
[CLS] Although practicality beats purity. [SEP]
[CLS] Errors should never pass silently. [SEP]
[CLS] Unless explicitly silenced. [SEP]
[CLS] In the face of ambiguity, refuse the temptation to guess. [SEP]
[CLS] There should be one-- and preferably only one --obvious way to do it. [SEP]
[CLS] Although that way may not be obvious at first unless you're Dutch. [SEP]
[CLS] Now is better than never. [SEP]
[CLS] Although never is often better than *right* now. [SEP]
[CLS] If the implementation is hard to explain, it's a bad idea. [SEP]
[CLS] If the implementation is easy to explain, it may be a good idea. [SEP]
[CLS] Namespaces are one honking great idea -- let's do more of those! [SEP]
"""
)
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]"
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [
"[CLS]",
" T",
"h",
"i",
"s",
" is ",
"a",
" ",
"te",
"s",
"t ",
"[SEP]",
]
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False,
special_tokens=["[PAD]", "[SEP]", "[CLS]"],
unk_token="[UNK]",
vocab_size=100,
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.get_vocab_size() == 100
tokenizer = Tokenizer(models.Unigram())
trainer = trainers.UnigramTrainer(
show_progress=False,
special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"],
unk_token="[UNK]",
vocab_size=100,
)
tokenizer.train([filename], trainer=trainer)
assert tokenizer.get_vocab_size() == 100
def test_cannot_train_different_model(self):
tokenizer = Tokenizer(models.BPE())
trainer = trainers.UnigramTrainer(show_progress=False)
with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"):
tokenizer.train([], trainer)
def test_can_modify(self):
trainer = trainers.UnigramTrainer(
vocab_size=12345,
show_progress=False,
special_tokens=["1", AddedToken("2", lstrip=True)],
initial_alphabet=["a", "b", "c"],
)
assert trainer.vocab_size == 12345
assert trainer.show_progress == False
assert trainer.special_tokens == [
AddedToken("1", normalized=False, special=True),
AddedToken("2", lstrip=True, normalized=False, special=True),
]
assert sorted(trainer.initial_alphabet) == ["a", "b", "c"]
# Modify these
trainer.vocab_size = 20000
assert trainer.vocab_size == 20000
trainer.show_progress = True
assert trainer.show_progress == True
trainer.special_tokens = []
assert trainer.special_tokens == []
trainer.initial_alphabet = ["d", "z"]
assert sorted(trainer.initial_alphabet) == ["d", "z"]
def test_continuing_prefix_trainer_mismatch(self):
UNK = "[UNK]"
special_tokens = [UNK]
tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##"))
trainer = trainers.BpeTrainer(special_tokens=special_tokens)
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)]
)
tokenizer.train(files=["data/big.txt"], trainer=trainer)
tokenizer.save("data/tokenizer.json")
tokenizer.from_file("data/tokenizer.json")
| tokenizers/bindings/python/tests/bindings/test_trainers.py/0 | {
"file_path": "tokenizers/bindings/python/tests/bindings/test_trainers.py",
"repo_id": "tokenizers",
"token_count": 4958
} |
# Added Tokens
<tokenizerslangcontent>
<python>
## AddedToken
[[autodoc]] tokenizers.AddedToken
- content
- lstrip
- normalized
- rstrip
- single_word
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/added-tokens.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/added-tokens.mdx",
"repo_id": "tokenizers",
"token_count": 134
} |
# Quicktour
Let's have a quick look at the ð€ Tokenizers library features. The
library provides an implementation of today's most used tokenizers that
is both easy to use and blazing fast.
## Build a tokenizer from scratch
To illustrate how fast the ð€ Tokenizers library is, let's train a new
tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
(516M of text) in just a few seconds. First things first, you will need
to download this dataset and unzip it with:
``` bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
```
### Training the tokenizer
In this tour, we will build and train a Byte-Pair Encoding (BPE)
tokenizer. For more information about the different type of tokenizers,
check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in
the ð€ Transformers documentation. Here, training the tokenizer means it
will learn merge rules by:
- Start with all the characters present in the training corpus as
tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached
the size we want.
The main API of the library is the `class` `Tokenizer`, here is how
we instantiate one with a BPE model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_tokenizer",
"end-before": "END quicktour_init_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To train our tokenizer on the wikitext files, we will need to
instantiate a [trainer]{.title-ref}, in this case a
`BpeTrainer`
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_trainer",
"end-before": "END quicktour_init_trainer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the training arguments like `vocab_size` or `min_frequency` (here
left at their default values of 30,000 and 0) but the most important
part is to give the `special_tokens` we
plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
<Tip>
The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0,
`"[CLS]"` will get the ID 1 and so forth.
</Tip>
We could train our tokenizer right now, but it wouldn't be optimal.
Without a pre-tokenizer that will split our inputs into words, we might
get tokens that overlap several words: for instance we could get an
`"it is"` token since those two words
often appear next to each other. Using a pre-tokenizer will ensure no
token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest
pre-tokenizer possible by splitting on whitespace.
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_pretok",
"end-before": "END quicktour_init_pretok",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Now, we can just call the `Tokenizer.train` method with any list of files we want to use:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_train",
"end-before": "END quicktour_train",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This should only take a few seconds to train our tokenizer on the full
wikitext dataset! To save the tokenizer in one file that contains all
its configuration and vocabulary, just use the
`Tokenizer.save` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_save",
"end-before": "END quicktour_save",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and you can reload your tokenizer from that file with the
`Tokenizer.from_file`
`classmethod`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_reload_tokenizer",
"end-before": "END quicktour_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Using the tokenizer
Now that we have trained a tokenizer, we can use it on any text we want
with the `Tokenizer.encode` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode",
"end-before": "END quicktour_encode",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This applied the full pipeline of the tokenizer on the text, returning
an `Encoding` object. To learn more
about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline).
This `Encoding` object then has all the
attributes you need for your deep learning model (or other). The
`tokens` attribute contains the
segmentation of your text in tokens:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_tokens",
"end-before": "END quicktour_print_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Similarly, the `ids` attribute will
contain the index of each of those tokens in the tokenizer's
vocabulary:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_ids",
"end-before": "END quicktour_print_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
An important feature of the ð€ Tokenizers library is that it comes with
full alignment tracking, meaning you can always get the part of your
original sentence that corresponds to a given token. Those are stored in
the `offsets` attribute of our
`Encoding` object. For instance, let's
assume we would want to find back what caused the
`"[UNK]"` token to appear, which is the
token at index 9 in the list, we can just ask for the offset at the
index:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_offsets",
"end-before": "END quicktour_print_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and those are the indices that correspond to the emoji in the original
sentence:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_use_offsets",
"end-before": "END quicktour_use_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Post-processing
We might want our tokenizer to automatically add special tokens, like
`"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor.
`TemplateProcessing` is the most
commonly used, you just have to specify a template for the processing of
single sentences and pairs of sentences, along with the special tokens
and their IDs.
When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1
and 2 of our list of special tokens, so this should be their IDs. To
double-check, we can use the `Tokenizer.token_to_id` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_check_sep",
"end-before": "END quicktour_check_sep",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Here is how we can set the post-processing to give us the traditional
BERT inputs:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_template_processing",
"end-before": "END quicktour_init_template_processing",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Let's go over this snippet of code in more details. First we specify
the template for single sentences: those should have the form
`"[CLS] $A [SEP]"` where
`$A` represents our sentence.
Then, we specify the template for sentence pairs, which should have the
form `"[CLS] $A [SEP] $B [SEP]"` where
`$A` represents the first sentence and
`$B` the second one. The
`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults
to 0 for everything (which is why we don't have
`$A:0`) and here we set it to 1 for the
tokens of the second sentence and the last `"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our
tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same
sentence as before:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens",
"end-before": "END quicktour_print_special_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To check the results on a pair of sentences, we just pass the two
sentences to `Tokenizer.encode`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens_pair",
"end-before": "END quicktour_print_special_tokens_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can then check the type IDs attributed to each token is correct with
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_type_ids",
"end-before": "END quicktour_print_type_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along.
### Encoding multiple sentences in a batch
To get the full speed of the ð€ Tokenizers library, it's best to
process your texts by batches by using the
`Tokenizer.encode_batch` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch",
"end-before": "END quicktour_encode_batch",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is then a list of `Encoding`
objects like the ones we saw before. You can process together as many
texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
`Tokenizer.encode_batch` method: the
list of sentences A and the list of sentences B:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch_pair",
"end-before": "END quicktour_encode_batch_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When encoding multiple sentences, you can automatically pad the outputs
to the longest sentence present by using
`Tokenizer.enable_padding`, with the
`pad_token` and its ID (which we can
double-check the id for the padding token with
`Tokenizer.token_to_id` like before):
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_enable_padding",
"end-before": "END quicktour_enable_padding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the `direction` of the padding
(defaults to the right) or a given `length` if we want to pad every sample to that specific number (here
we leave it unset to pad to the size of the longest text).
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_batch_tokens",
"end-before": "END quicktour_print_batch_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
In this case, the `attention mask` generated by the
tokenizer takes the padding into account:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_attention_mask",
"end-before": "END quicktour_print_attention_mask",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Pretrained
<tokenizerslangcontent>
<python>
### Using a pretrained tokenizer
You can load any tokenizer from the Hugging Face Hub as long as a
`tokenizer.json` file is available in the repository.
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
```
### Importing a pretrained tokenizer from legacy vocabulary files
You can also import a pretrained tokenizer directly in, as long as you
have its vocabulary file. For instance, here is how to import the
classic pretrained BERT tokenizer:
```python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
```
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
```bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
```
</python>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/quicktour.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/quicktour.mdx",
"repo_id": "tokenizers",
"token_count": 7936
} |
Components
====================================================================================================
When building a Tokenizer, you can attach various types of components to this Tokenizer in order
to customize its behavior. This page lists most provided components.
.. _normalizers:
.. entities:: python
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence([NFKC(), Lowercase()])``
PreTokenizer.Sequence
``Sequence([Punctuation(), WhitespaceSplit()])``
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`merged_with_previous`
SplitDelimiterBehavior.merged_with_next
:obj:`merged_with_next`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
.. entities:: rust
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence::new(vec![NFKC, Lowercase])``
PreTokenizer.Sequence
``Sequence::new(vec![Punctuation, WhitespaceSplit])``
SplitDelimiterBehavior.removed
:obj:`Removed`
SplitDelimiterBehavior.isolated
:obj:`Isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`MergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`MergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`Contiguous`
.. entities:: node
BertNormalizer.clean_text
cleanText
BertNormalizer.handle_chinese_chars
handleChineseChars
BertNormalizer.strip_accents
stripAccents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
..
PreTokenizer.Sequence
..
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`mergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`mergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
Normalizers
----------------------------------------------------------------------------------------------------
A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as
relevant for a given use case. Some common examples of normalization are the Unicode normalization
algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc...
The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This
is essential to allow mapping from the generated tokens back to the input text.
The ``Normalizer`` is optional.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - NFD
- NFD unicode normalization
-
* - NFKD
- NFKD unicode normalization
-
* - NFC
- NFC unicode normalization
-
* - NFKC
- NFKC unicode normalization
-
* - Lowercase
- Replaces all uppercase to lowercase
- Input: ``HELLO áœÎΥΣΣÎÎΣ``
Output: ``hello áœÎŽÏ
ÏÏεÏÏ``
* - Strip
- Removes all whitespace characters on the specified sides (left, right or both) of the input
- Input: ``" hi "``
Output: ``"hi"``
* - StripAccents
- Removes all accent symbols in unicode (to be used with NFD for consistency)
- Input: ``é``
Output: ``e``
* - Replace
- Replaces a custom string or regexp and changes it with given content
- ``Replace("a", "e")`` will behave like this:
Input: ``"banana"``
Output: ``"benene"``
* - BertNormalizer
- Provides an implementation of the Normalizer used in the original BERT. Options
that can be set are:
- :entity:`BertNormalizer.clean_text`
- :entity:`BertNormalizer.handle_chinese_chars`
- :entity:`BertNormalizer.strip_accents`
- :entity:`BertNormalizer.lowercase`
-
* - Sequence
- Composes multiple normalizers that will run in the provided order
- :entity:`Normalizer.Sequence`
.. _pre-tokenizers:
Pre tokenizers
----------------------------------------------------------------------------------------------------
The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This
pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple
"splits".
For example if you don't want to have whitespaces inside a token, then you can have a
``PreTokenizer`` that splits on these whitespaces.
You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below).
The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This
is necessary to allow some complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - ByteLevel
- Splits on whitespaces while remapping all the bytes to a set of visible characters. This
technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
- Since it maps on bytes, a tokenizer using this only requires **256** characters as initial
alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode
characters.
- A consequence of the previous point is that it is absolutely unnecessary to have an
unknown token using this since we can represent anything with 256 tokens (Youhou!! ðð)
- For non ascii characters, it gets completely unreadable, but it works nonetheless!
- Input: ``"Hello my friend, how are you?"``
Output: ``"Hello", "Ä my", Ä friend", ",", "Ä how", "Ä are", "Ä you", "?"``
* - Whitespace
- Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+``
- Input: ``"Hello there!"``
Output: ``"Hello", "there", "!"``
* - WhitespaceSplit
- Splits on any whitespace character
- Input: ``"Hello there!"``
Output: ``"Hello", "there!"``
* - Punctuation
- Will isolate all punctuation characters
- Input: ``"Hello?"``
Output: ``"Hello", "?"``
* - Metaspace
- Splits on whitespaces and replaces them with a special char "â" (U+2581)
- Input: ``"Hello there"``
Output: ``"Hello", "âthere"``
* - CharDelimiterSplit
- Splits on a given character
- Example with ``x``:
Input: ``"Helloxthere"``
Output: ``"Hello", "there"``
* - Digits
- Splits the numbers from any other characters.
- Input: ``"Hello123there"``
Output: ```"Hello", "123", "there"```
* - Split
- Versatile pre-tokenizer that splits on provided pattern and according to provided behavior.
The pattern can be inverted if necessary.
- pattern should be either a custom string or regexp.
- behavior should be one of:
* :entity:`SplitDelimiterBehavior.removed`
* :entity:`SplitDelimiterBehavior.isolated`
* :entity:`SplitDelimiterBehavior.merged_with_previous`
* :entity:`SplitDelimiterBehavior.merged_with_next`
* :entity:`SplitDelimiterBehavior.contiguous`
- invert should be a boolean flag.
- Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`:
Input: ``"Hello, how are you?"``
Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"```
* - Sequence
- Lets you compose multiple ``PreTokenizer`` that will be run in the given order
- :entity:`PreTokenizer.Sequence`
.. _models:
Models
----------------------------------------------------------------------------------------------------
Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory
component of a Tokenizer.
.. list-table::
:header-rows: 1
* - Name
- Description
* - WordLevel
- This is the "classic" tokenization algorithm. It let's you simply map words to IDs
without anything fancy. This has the advantage of being really simple to use and
understand, but it requires extremely large vocabularies for a good coverage.
*Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by
this model directly, it simply maps input tokens to IDs*
* - BPE
- One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by
starting with characters, while merging those that are the most frequently seen together,
thus creating new tokens. It then works iteratively to build new tokens out of the most
frequent pairs it sees in a corpus.
BPE is able to build words it has never seen by using multiple subword tokens, and thus
requires smaller vocabularies, with less chances of having "unk" (unknown) tokens.
* - WordPiece
- This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in
models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting
in multiple tokens when entire words don't exist in the vocabulary. This is different from
BPE that starts from characters, building bigger tokens as possible.
It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting
a word).
* - Unigram
- Unigram is also a subword tokenization algorithm, and works by trying to identify the best
set of subword tokens to maximize the probability for a given sentence. This is different
from BPE in the way that this is not deterministic based on a set of rules applied
sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while
choosing the most probable one.
.. _post-processors:
PostProcessor
----------------------------------------------------------------------------------------------------
After the whole pipeline, we sometimes want to insert some special tokens before feed
a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor``
is the component doing just that.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - TemplateProcessing
- Let's you easily template the post processing, adding special tokens, and specifying
the ``type_id`` for each sequence/special token. The template is given two strings
representing the single sequence and the pair of sequences, as well as a set of
special tokens to use.
- Example, when specifying a template with these values:
- single: ``"[CLS] $A [SEP]"``
- pair: ``"[CLS] $A [SEP] $B [SEP]"``
- special tokens:
- ``"[CLS]"``
- ``"[SEP]"``
Input: ``("I like this", "but not this")``
Output: ``"[CLS] I like this [SEP] but not this [SEP]"``
.. _decoders:
Decoders
----------------------------------------------------------------------------------------------------
The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text.
Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be
reverted for example.
.. list-table::
:header-rows: 1
* - Name
- Description
* - ByteLevel
- Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using
a set of visible Unicode characters to represent each byte, so we need a Decoder to
revert this process and get something readable again.
* - Metaspace
- Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifier ``â`` to
identify whitespaces, and so this Decoder helps with decoding these.
* - WordPiece
- Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing
subwords, and so this Decoder helps with decoding these.
| tokenizers/docs/source/components.rst/0 | {
"file_path": "tokenizers/docs/source/components.rst",
"repo_id": "tokenizers",
"token_count": 4223
} |
<p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
<a href="https://docs.rs/tokenizers/">
<img alt="Doc" src="https://docs.rs/tokenizers/badge.svg">
</a>
</p>
<br>
{{readme}} | tokenizers/tokenizers/README.tpl/0 | {
"file_path": "tokenizers/tokenizers/README.tpl",
"repo_id": "tokenizers",
"token_count": 259
} |
pub mod bpe;
pub mod byte_fallback;
pub mod ctc;
pub mod fuse;
pub mod sequence;
pub mod strip;
pub mod wordpiece;
// Re-export these as decoders
pub use super::pre_tokenizers::byte_level;
pub use super::pre_tokenizers::metaspace;
use serde::{Deserialize, Deserializer, Serialize};
use crate::decoders::bpe::BPEDecoder;
use crate::decoders::byte_fallback::ByteFallback;
use crate::decoders::ctc::CTC;
use crate::decoders::fuse::Fuse;
use crate::decoders::sequence::Sequence;
use crate::decoders::strip::Strip;
use crate::decoders::wordpiece::WordPiece;
use crate::normalizers::replace::Replace;
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::pre_tokenizers::metaspace::Metaspace;
use crate::{Decoder, Result};
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum DecoderWrapper {
BPE(BPEDecoder),
ByteLevel(ByteLevel),
WordPiece(WordPiece),
Metaspace(Metaspace),
CTC(CTC),
Sequence(Sequence),
Replace(Replace),
Fuse(Fuse),
Strip(Strip),
ByteFallback(ByteFallback),
}
impl<'de> Deserialize<'de> for DecoderWrapper {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
pub struct Tagged {
#[serde(rename = "type")]
variant: EnumType,
#[serde(flatten)]
rest: serde_json::Value,
}
#[derive(Serialize, Deserialize)]
pub enum EnumType {
BPEDecoder,
ByteLevel,
WordPiece,
Metaspace,
CTC,
Sequence,
Replace,
Fuse,
Strip,
ByteFallback,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum DecoderHelper {
Tagged(Tagged),
Legacy(serde_json::Value),
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum DecoderUntagged {
BPE(BPEDecoder),
ByteLevel(ByteLevel),
WordPiece(WordPiece),
Metaspace(Metaspace),
CTC(CTC),
Sequence(Sequence),
Replace(Replace),
Fuse(Fuse),
Strip(Strip),
ByteFallback(ByteFallback),
}
let helper = DecoderHelper::deserialize(deserializer).expect("Helper");
Ok(match helper {
DecoderHelper::Tagged(model) => {
let mut values: serde_json::Map<String, serde_json::Value> =
serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?;
values.insert(
"type".to_string(),
serde_json::to_value(&model.variant).map_err(serde::de::Error::custom)?,
);
let values = serde_json::Value::Object(values);
match model.variant {
EnumType::BPEDecoder => DecoderWrapper::BPE(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::ByteLevel => DecoderWrapper::ByteLevel(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::WordPiece => DecoderWrapper::WordPiece(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Metaspace => DecoderWrapper::Metaspace(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::CTC => DecoderWrapper::CTC(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Sequence => DecoderWrapper::Sequence(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Replace => DecoderWrapper::Replace(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Fuse => DecoderWrapper::Fuse(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Strip => DecoderWrapper::Strip(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::ByteFallback => DecoderWrapper::ByteFallback(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
}
}
DecoderHelper::Legacy(value) => {
let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
match untagged {
DecoderUntagged::BPE(dec) => DecoderWrapper::BPE(dec),
DecoderUntagged::ByteLevel(dec) => DecoderWrapper::ByteLevel(dec),
DecoderUntagged::WordPiece(dec) => DecoderWrapper::WordPiece(dec),
DecoderUntagged::Metaspace(dec) => DecoderWrapper::Metaspace(dec),
DecoderUntagged::CTC(dec) => DecoderWrapper::CTC(dec),
DecoderUntagged::Sequence(dec) => DecoderWrapper::Sequence(dec),
DecoderUntagged::Replace(dec) => DecoderWrapper::Replace(dec),
DecoderUntagged::Fuse(dec) => DecoderWrapper::Fuse(dec),
DecoderUntagged::Strip(dec) => DecoderWrapper::Strip(dec),
DecoderUntagged::ByteFallback(dec) => DecoderWrapper::ByteFallback(dec),
}
}
})
}
}
impl Decoder for DecoderWrapper {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
match self {
Self::BPE(bpe) => bpe.decode_chain(tokens),
Self::ByteLevel(bl) => bl.decode_chain(tokens),
Self::Metaspace(ms) => ms.decode_chain(tokens),
Self::WordPiece(wp) => wp.decode_chain(tokens),
Self::CTC(ctc) => ctc.decode_chain(tokens),
Self::Sequence(seq) => seq.decode_chain(tokens),
Self::Replace(seq) => seq.decode_chain(tokens),
Self::ByteFallback(bf) => bf.decode_chain(tokens),
Self::Strip(bf) => bf.decode_chain(tokens),
Self::Fuse(bf) => bf.decode_chain(tokens),
}
}
}
impl_enum_from!(BPEDecoder, DecoderWrapper, BPE);
impl_enum_from!(ByteLevel, DecoderWrapper, ByteLevel);
impl_enum_from!(ByteFallback, DecoderWrapper, ByteFallback);
impl_enum_from!(Fuse, DecoderWrapper, Fuse);
impl_enum_from!(Strip, DecoderWrapper, Strip);
impl_enum_from!(Metaspace, DecoderWrapper, Metaspace);
impl_enum_from!(WordPiece, DecoderWrapper, WordPiece);
impl_enum_from!(CTC, DecoderWrapper, CTC);
impl_enum_from!(Sequence, DecoderWrapper, Sequence);
impl_enum_from!(Replace, DecoderWrapper, Replace);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn decoder_serialization() {
let oldjson = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"â","add_prefix_space":true,"prepend_scheme":"always"}]}"#;
let olddecoder: DecoderWrapper = serde_json::from_str(oldjson).unwrap();
let oldserialized = serde_json::to_string(&olddecoder).unwrap();
let json = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"â","prepend_scheme":"always","split":true}]}"#;
assert_eq!(oldserialized, json);
let decoder: DecoderWrapper = serde_json::from_str(json).unwrap();
let serialized = serde_json::to_string(&decoder).unwrap();
assert_eq!(serialized, json);
}
#[test]
fn decoder_serialization_other_no_arg() {
let json = r#"{"type":"Sequence","decoders":[{"type":"Fuse"},{"type":"Metaspace","replacement":"â","prepend_scheme":"always","split":true}]}"#;
let decoder: DecoderWrapper = serde_json::from_str(json).unwrap();
let serialized = serde_json::to_string(&decoder).unwrap();
assert_eq!(serialized, json);
}
#[test]
fn decoder_serialization_no_decode() {
let json = r#"{"type":"Sequence","decoders":[{},{"type":"Metaspace","replacement":"â","prepend_scheme":"always"}]}"#;
let parse = serde_json::from_str::<DecoderWrapper>(json);
match parse {
Err(err) => assert_eq!(
format!("{err}"),
"data did not match any variant of untagged enum DecoderUntagged"
),
_ => panic!("Expected error"),
}
let json = r#"{"replacement":"â","prepend_scheme":"always"}"#;
let parse = serde_json::from_str::<DecoderWrapper>(json);
match parse {
Err(err) => assert_eq!(
format!("{err}"),
"data did not match any variant of untagged enum DecoderUntagged"
),
_ => panic!("Expected error"),
}
let json = r#"{"type":"Sequence","prepend_scheme":"always"}"#;
let parse = serde_json::from_str::<DecoderWrapper>(json);
match parse {
Err(err) => assert_eq!(format!("{err}"), "missing field `decoders`"),
_ => panic!("Expected error"),
}
}
}
| tokenizers/tokenizers/src/decoders/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/mod.rs",
"repo_id": "tokenizers",
"token_count": 4660
} |
use std::collections::HashMap;
use std::hash::Hash;
#[derive(Default)]
pub struct TrieBuilder<Label> {
trie: Trie<Label>,
}
impl<Label: Eq + Hash + Copy> TrieBuilder<Label> {
pub fn push(&mut self, element: &[Label]) {
self.trie.push(element);
}
pub fn build(self) -> Trie<Label> {
self.trie
}
}
#[derive(Clone)]
pub struct Trie<Label> {
root: Node<Label>,
}
impl<Label: Eq + Hash + Copy> Trie<Label> {
pub fn push(&mut self, element: &[Label]) {
let mut node = &mut self.root;
for label in element.iter() {
node = node.children.entry(*label).or_default();
}
node.is_leaf = true;
}
pub fn common_prefix_search<T>(&self, iterator: T) -> TrieIterator<Label, T>
where
T: Iterator<Item = Label>,
{
TrieIterator {
node: &self.root,
prefix: vec![],
iterator,
}
}
}
pub struct TrieIterator<'a, Label, T> {
node: &'a Node<Label>,
prefix: Vec<Label>,
iterator: T,
}
impl<Label, T> Iterator for TrieIterator<'_, Label, T>
where
Label: Eq + Hash + Copy,
T: Iterator<Item = Label>,
{
type Item = Vec<Label>;
fn next(&mut self) -> Option<Self::Item> {
loop {
let label = self.iterator.next()?;
self.prefix.push(label);
let child = self.node.children.get(&label)?;
self.node = child;
if self.node.is_leaf {
return Some(self.prefix.clone());
}
}
}
}
impl<Label> Default for Trie<Label> {
fn default() -> Self {
Self {
root: Node::default(),
}
}
}
#[derive(Clone)]
pub struct Node<Label> {
is_leaf: bool,
children: HashMap<Label, Node<Label>>,
}
impl<Label> Default for Node<Label> {
fn default() -> Self {
Self {
is_leaf: false,
children: HashMap::new(),
}
}
}
| tokenizers/tokenizers/src/models/unigram/trie.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/unigram/trie.rs",
"repo_id": "tokenizers",
"token_count": 944
} |
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_bert_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct BertPreTokenizer;
impl PreTokenizer for BertPreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?;
pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{NormalizedString, OffsetReferential, OffsetType};
#[test]
fn basic() {
let pretok = BertPreTokenizer;
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn chinese_chars() {
let mut n = NormalizedString::from("éå£é䜳 Noguchi Rika");
n.transform(
n.get().to_owned().chars().flat_map(|c| {
if (c as usize) > 0x4E00 {
vec![(' ', 0), (c, 1), (' ', 1)]
} else {
vec![(c, 0)]
}
}),
0,
);
let mut pretokenized = n.into();
let pretok = BertPreTokenizer;
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("é", (0, 3)),
("å£", (3, 6)),
("é", (6, 9)),
("䜳", (9, 12)),
("Noguchi", (13, 20)),
("Rika", (21, 25))
]
);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/bert.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/bert.rs",
"repo_id": "tokenizers",
"token_count": 1460
} |
use crate::processors::PostProcessorWrapper;
use crate::tokenizer::{Encoding, PostProcessor, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
processors: Vec<PostProcessorWrapper>,
}
impl Sequence {
pub fn new(processors: Vec<PostProcessorWrapper>) -> Self {
Self { processors }
}
pub fn get(&self, index: usize) -> Option<&PostProcessorWrapper> {
self.processors.get(index)
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut PostProcessorWrapper> {
self.processors.get_mut(index)
}
pub fn set_mut(&mut self, index: usize, post_proc: PostProcessorWrapper) {
self.processors[index] = post_proc;
}
}
impl AsRef<[PostProcessorWrapper]> for Sequence {
fn as_ref(&self) -> &[PostProcessorWrapper] {
&self.processors
}
}
impl AsMut<[PostProcessorWrapper]> for Sequence {
fn as_mut(&mut self) -> &mut [PostProcessorWrapper] {
&mut self.processors
}
}
impl IntoIterator for Sequence {
type Item = PostProcessorWrapper;
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.processors.into_iter()
}
}
impl PostProcessor for Sequence {
fn added_tokens(&self, is_pair: bool) -> usize {
self.processors
.iter()
.map(|p| p.added_tokens(is_pair))
.sum::<usize>()
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
for processor in &self.processors {
encodings = processor.process_encodings(encodings, add_special_tokens)?;
}
Ok(encodings)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::processors::{ByteLevel, PostProcessorWrapper};
use crate::tokenizer::{Encoding, PostProcessor};
use std::collections::HashMap;
use std::iter::FromIterator;
#[test]
fn process_chain() {
let start = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
HashMap::new(),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
let sequence = Sequence::new(vec![PostProcessorWrapper::ByteLevel(bytelevel)]);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5)]),
);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
assert_eq!(
expected,
sequence.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start.clone()), false)
.unwrap()
);
assert_eq!(
pair_expected,
sequence.process(start.clone(), Some(start), false).unwrap()
);
}
}
| tokenizers/tokenizers/src/processors/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/processors/sequence.rs",
"repo_id": "tokenizers",
"token_count": 2672
} |
//!
//! This module defines helpers to allow optional Rayon usage.
//!
use rayon::iter::IterBridge;
use rayon::prelude::*;
use rayon_cond::CondIterator;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicU8;
use std::sync::atomic::Ordering;
// Re-export rayon current_num_threads
pub use rayon::current_num_threads;
pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM";
static USED_PARALLELISM: AtomicBool = AtomicBool::new(false);
static PARALLELISM: AtomicU8 = AtomicU8::new(0);
/// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set
pub fn is_parallelism_configured() -> bool {
std::env::var(ENV_VARIABLE).is_ok() || get_override_parallelism().is_some()
}
/// Check if at some point we used a parallel iterator
pub fn has_parallelism_been_used() -> bool {
USED_PARALLELISM.load(Ordering::SeqCst)
}
/// Get internally set parallelism
fn get_override_parallelism() -> Option<bool> {
match PARALLELISM.load(Ordering::SeqCst) {
0 => None,
1 => Some(false),
2 => Some(true),
_ => unreachable!(),
}
}
/// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable
fn get_env_parallelism() -> bool {
match std::env::var(ENV_VARIABLE) {
Ok(mut v) => {
v.make_ascii_lowercase();
!matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0")
}
Err(_) => true, // If we couldn't get the variable, we use the default
}
}
pub fn get_parallelism() -> bool {
if let Some(parallel) = get_override_parallelism() {
parallel
} else {
get_env_parallelism()
}
}
/// Set the value for `TOKENIZERS_PARALLELISM` for the current process
pub fn set_parallelism(val: bool) {
PARALLELISM.store(if val { 2 } else { 1 }, Ordering::SeqCst);
}
/// Allows to convert into an iterator that can be executed either parallelly or serially.
///
/// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable.
/// This variable can have one of the following values
/// - False => "" (empty value), "false", "f", "off", "no", "n", "0"
/// - True => Any other value
///
pub trait MaybeParallelIterator<P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn into_maybe_par_iter(self) -> CondIterator<P, S>;
/// Convert ourself in a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>;
}
impl<P, S, I> MaybeParallelIterator<P, S> for I
where
I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
{
fn into_maybe_par_iter(self) -> CondIterator<P, S> {
let parallelism = get_parallelism();
if parallelism {
USED_PARALLELISM.store(true, Ordering::SeqCst);
}
CondIterator::new(self, parallelism)
}
fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> {
if cond {
self.into_maybe_par_iter()
} else {
CondIterator::from_serial(self)
}
}
}
/// Shared reference version of MaybeParallelIterator, works the same but returns an iterator
/// over references, does not consume self
pub trait MaybeParallelRefIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S>;
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I
where
&'data I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter(&'data self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator
/// over mutable references, does not consume self
pub trait MaybeParallelRefMutIterator<'data, P, S>
where
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>;
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>;
}
impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I
where
&'data mut I: MaybeParallelIterator<P, S>,
P: ParallelIterator,
S: Iterator<Item = P::Item>,
P::Item: 'data,
{
fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> {
self.into_maybe_par_iter()
}
fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> {
self.into_maybe_par_iter_cond(cond)
}
}
/// Converts any serial iterator into a CondIterator, that can either run parallelly or serially.
pub trait MaybeParallelBridge<T, S>
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>;
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>;
}
impl<T, S> MaybeParallelBridge<T, S> for S
where
S: Iterator<Item = T> + Send,
T: Send,
{
fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> {
let iter = CondIterator::from_serial(self);
if get_parallelism() {
USED_PARALLELISM.store(true, Ordering::SeqCst);
CondIterator::from_parallel(iter.into_parallel().right().unwrap())
} else {
iter
}
}
fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> {
if cond {
self.maybe_par_bridge()
} else {
CondIterator::from_serial(self)
}
}
}
/// Allows to convert into `chunks` that can be executed either parallelly or serially.
pub trait MaybeParallelSlice<'data, T>
where
T: Sync,
{
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based solely on the `TOKENIZERS_PARALLELISM` environment variable
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
/// Create a CondIterator, that will be executed either in parallel or serially,
/// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool.
/// Both must be true to run with parallelism activated.
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>;
}
impl<T> MaybeParallelSlice<'_, T> for [T]
where
T: Sync,
{
fn maybe_par_chunks(
&'_ self,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
let parallelism = get_parallelism();
if parallelism {
CondIterator::from_parallel(self.par_chunks(chunk_size))
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
fn maybe_par_chunks_cond(
&'_ self,
cond: bool,
chunk_size: usize,
) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> {
if cond {
self.maybe_par_chunks(chunk_size)
} else {
CondIterator::from_serial(self.chunks(chunk_size))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_maybe_parallel_iterator() {
let mut v = vec![1u32, 2, 3, 4, 5, 6];
assert_eq!(v.maybe_par_iter().sum::<u32>(), 21);
assert_eq!(
v.maybe_par_iter_mut()
.map(|v| {
*v *= 2;
*v
})
.sum::<u32>(),
42
);
assert_eq!(v.maybe_par_iter().sum::<u32>(), 42);
assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42);
}
#[test]
fn test_maybe_parallel_slice() {
let v = [1, 2, 3, 4, 5];
let chunks: Vec<_> = v.maybe_par_chunks(2).collect();
assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
}
}
| tokenizers/tokenizers/src/utils/parallelism.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/parallelism.rs",
"repo_id": "tokenizers",
"token_count": 3698
} |
To install via [NPM](https://www.npmjs.com/package/@huggingface/transformers), run:
```bash
npm i @huggingface/transformers
```
Alternatively, you can use it in vanilla JS, without any bundler, by using a CDN or static hosting. For example, using [ES Modules](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules), you can import the library with:
```html
<script type="module">
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]';
</script>
```
| transformers.js/docs/snippets/2_installation.snippet/0 | {
"file_path": "transformers.js/docs/snippets/2_installation.snippet",
"repo_id": "transformers.js",
"token_count": 176
} |
import Chart from 'chart.js/auto';
import Prism from 'prismjs';
// Import code and styles for supported languages
import 'prismjs/components/prism-javascript';
import 'prismjs/components/prism-python';
import 'prismjs/components/prism-markdown';
import 'prismjs/components/prism-clike';
import 'prismjs/themes/prism.css'
import './theme.css';
import './style.css';
// Initialise worker
const worker = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module',
});
// Define elements
const TASK_SELECTOR = document.getElementById('task');
let searchParams = new URLSearchParams(location.search);
let defaultDemo = searchParams.get('demo');
if (defaultDemo) {
TASK_SELECTOR.value = defaultDemo;
}
// translation inputs
const LANGUAGE_FROM = document.getElementById('language-from');
const LANGUAGE_TO = document.getElementById('language-to');
const INPUT_TEXTBOX = document.getElementById('input-textbox');
const OUTPUT_TEXTBOX = document.getElementById('output-textbox');
// text generation inputs
const TEXT_GENERATION_TEXTBOX = document.getElementById('text-generation-textbox');
const TASKS = document.getElementsByClassName('task-settings')
const PROGRESS = document.getElementById('progress');
const PROGRESS_BARS = document.getElementById('progress-bars');
const GENERATE_BUTTON = document.getElementById('generate');
const MLM_INPUT_TEXTBOX = document.getElementById('mlm-input-textbox');
const MLM_OUTPUT_TEXTBOX = document.getElementById('mlm-output-textbox');
const SC_INPUT_TEXTBOX = document.getElementById('sc-input-textbox');
const SC_OUTPUT_CANVAS = document.getElementById('sc-canvas');
const TC_INPUT_TEXTBOX = document.getElementById('tc-input-textbox');
const TC_OUTPUT = document.getElementById('tc-output');
const QA_CONTEXT_TEXTBOX = document.getElementById('qa-context-textbox');
const QA_QUESTION_TEXTBOX = document.getElementById('qa-question-textbox');
const QA_ANSWER_TEXTBOX = document.getElementById('qa-answer-textbox');
const SUMMARIZATION_INPUT_TEXTBOX = document.getElementById('summarization-input-textbox');
const SUMMARIZATION_OUTPUT_TEXTBOX = document.getElementById('summarization-output-textbox');
const SPEECH2TEXT_SELECT = document.getElementById('audio-select');
const SPEECH2TEXT_INPUT = document.getElementById('audio-file');
const SPEECH2TEXT_AUDIO = document.getElementById('audio-player');
const SPEECH2TEXT_OUTPUT_TEXTBOX = document.getElementById('speech2text-output-textbox');
const TEXT2IMAGE_SELECT = document.getElementById('image-select');
const TEXT2IMAGE_INPUT = document.getElementById('image-file');
const TEXT2IMAGE_IMG = document.getElementById('image-viewer');
const TEXT2IMAGE_OUTPUT_TEXTBOX = document.getElementById('image2text-output-textbox');
const IMAGE_CLASSIFICATION_SELECT = document.getElementById('ic-select');
const IMAGE_CLASSIFICATION_INPUT = document.getElementById('ic-file');
const IMAGE_CLASSIFICATION_IMG = document.getElementById('ic-viewer');
const IMAGE_CLASSIFICATION_OUTPUT_CANVAS = document.getElementById('ic-canvas');
const CODE_COMPLETION_CONTAINER = document.getElementById('code-completion-container');
const ZSIC_SELECT = document.getElementById('zsic-select');
const ZSIC_INPUT = document.getElementById('zsic-file');
const ZSIC_CLASSES = document.getElementById('zsic-classes');
const ZSIC_IMG = document.getElementById('zsic-viewer');
const ZSIC_OUTPUT_CANVAS = document.getElementById('zsic-canvas');
const OD_SELECT = document.getElementById('od-select');
const OD_INPUT = document.getElementById('od-file');
const OD_IMG = document.getElementById('od-viewer');
const OD_OUTPUT_OVERLAY = document.getElementById('od-overlay');
const OD_OUTPUT_CANVAS = document.getElementById('od-canvas');
const ZSC_INPUT_TEXTBOX = document.getElementById('zsc-input-textbox');
const ZSC_CLASSES = document.getElementById('zsc-classes');
const ZSC_OUTPUT_CANVAS = document.getElementById('zsc-canvas');
const DEFAULT_GREEDY_PARAMS = {
max_new_tokens: 50,
num_beams: 1,
temperature: 1,
top_k: 0,
do_sample: false
}
const TASK_DEFAULT_PARAMS = {
'translation': DEFAULT_GREEDY_PARAMS,
'text-generation': {
max_new_tokens: 100,
num_beams: 1,
temperature: 1,
top_k: 20,
do_sample: true
},
'code-completion': DEFAULT_GREEDY_PARAMS,
'masked-language-modelling': {
topk: 5 // number of samples
},
'sequence-classification': {},
'token-classification': {},
'zero-shot-classification': {
multi_label: false
},
'question-answering': {},
'summarization': {
max_new_tokens: 50,
num_beams: 2,
temperature: 1,
top_k: 0,
do_sample: false
},
'automatic-speech-recognition': DEFAULT_GREEDY_PARAMS,
'image-to-text': DEFAULT_GREEDY_PARAMS,
'image-classification': {},
'zero-shot-image-classification': {},
'object-detection': {},
};
[
[SPEECH2TEXT_SELECT, SPEECH2TEXT_INPUT, SPEECH2TEXT_AUDIO],
[TEXT2IMAGE_SELECT, TEXT2IMAGE_INPUT, TEXT2IMAGE_IMG],
[IMAGE_CLASSIFICATION_SELECT, IMAGE_CLASSIFICATION_INPUT, IMAGE_CLASSIFICATION_IMG],
[ZSIC_SELECT, ZSIC_INPUT, ZSIC_IMG],
[OD_SELECT, OD_INPUT, OD_IMG],
].forEach(x => {
let [select, input, media] = x;
select.addEventListener('input', (e) => {
if (select.options[select.selectedIndex].hasAttribute('show-custom')) {
input.style.display = 'block';
} else {
input.style.display = 'none';
media.src = select.value
}
})
input.addEventListener("change", () => {
const file = input.files[0];
const url = URL.createObjectURL(file);
media.src = url;
});
});
const NER_TAGS = {
// tag: [textColour, backgroundColour, tagColour]
'ORG': ['#115E59', '#CCFBF1', '#14B8A6'],
'PER': ['#9D174D', '#FCE7F3', '#EC4899'],
'LOC': ['#86198F', '#FAE8FF', '#D946EF'],
}
// Predefined list of unique colours
const COLOURS = [
'255, 99, 132',
'54, 162, 235',
'255, 206, 86',
'75, 192, 192',
'153, 102, 255',
'255, 159, 64',
]
OD_SELECT.addEventListener('change', () => {
// Clear overlay and chart data on change
OD_OUTPUT_OVERLAY.innerHTML = '';
const chart = CHARTS[OD_OUTPUT_CANVAS.id];
chart.data = structuredClone(DEFAULT_DATA);
chart.update();
});
OD_OUTPUT_OVERLAY.addEventListener('mousemove', (e) => {
let rects = OD_OUTPUT_OVERLAY.querySelectorAll('rect')
let colours = [];
let borderColours = [];
rects.forEach((rect, i) => {
let colour = COLOURS[i % COLOURS.length];
// Display if hovering over background (tagName === 'svg')
let toDisplay = e.target.tagName !== 'rect';
if (!toDisplay) {
// Perform additional check
let bb = rect.getBoundingClientRect()
// Check if box intersects with current mouse positition
toDisplay = e.clientX >= bb.left && e.clientX <= bb.right && e.clientY >= bb.top && e.clientY <= bb.bottom
}
if (toDisplay) {
// Set back to original
rect.style.fillOpacity = 0.1;
rect.style.opacity = 1;
colours.push(`rgba(${colour}, 0.5)`);
borderColours.push(`rgba(${colour}, 1)`);
} else {
// Hovering over a rect, so set all other rects to 0 opacity
rect.style.fillOpacity = 0;
rect.style.opacity = 0;
colours.push(`rgba(${colour}, 0.05)`);
borderColours.push(`rgba(${colour}, 0.5)`);
}
})
const chart = CHARTS['od-canvas'];
chart.data.datasets[0].backgroundColor = colours;
chart.data.datasets[0].borderColor = borderColours;
chart.update();
})
function updateParams(task) {
let params = TASK_DEFAULT_PARAMS[task]
if (!params) return;
for (let [key, value] of Object.entries(params)) {
let element = document.querySelector(`.generation-option[param-name="${key}"]`)
if (!element) continue;
element.value = value;
}
}
// Parameters
const GENERATION_OPTIONS = document.getElementsByClassName('generation-option');
const CHART_OPTIONS = {
responsive: true,
maintainAspectRatio: false,
indexAxis: 'y',
scales: {
y: {
beginAtZero: true,
},
x: {
min: 0,
max: 1,
}
},
plugins: {
legend: {
display: false
},
},
layout: {
padding: {
bottom: -5,
}
},
};
// Initialise all code blocks
const CODE_BLOCKS = {};
[...document.querySelectorAll('.code-container')].forEach(element => {
// Guide to add editable code block:
// https://codepen.io/WebCoder49/pen/dyNyraq
// https://css-tricks.com/creating-an-editable-textarea-that-supports-syntax-highlighted-code/
const CODE_HIGHLIGHT = element.querySelector('pre');
const CODE_HIGHLIGHT_CONTENT = element.querySelector('code');
const CODE_COMPLETION_TEXTBOX = element.querySelector('textarea');
let sync_scroll = () => {
/* Scroll result to scroll coords of event - sync with textarea */
CODE_HIGHLIGHT.scrollTop = CODE_COMPLETION_TEXTBOX.scrollTop;
CODE_HIGHLIGHT.scrollLeft = CODE_COMPLETION_TEXTBOX.scrollLeft;
}
let update = (text) => {
// Handle final newlines (see article)
if (text[text.length - 1] == "\n") {
text += " ";
}
// Update code
CODE_HIGHLIGHT_CONTENT.innerHTML = escapeHtml(text);
// Syntax Highlight
Prism.highlightElement(CODE_HIGHLIGHT_CONTENT);
}
// Update code function
let updateCode = (text) => {
update(text);
sync_scroll();
};
CODE_BLOCKS[element.id] = {
update: (text) => {
CODE_COMPLETION_TEXTBOX.value = text;
updateCode(text);
// When updating, set scroll to bottom
// https://stackoverflow.com/a/9170709
CODE_COMPLETION_TEXTBOX.scrollTop = CODE_COMPLETION_TEXTBOX.scrollHeight;
},
text: () => CODE_COMPLETION_TEXTBOX.value
};
CODE_COMPLETION_TEXTBOX.oninput = () => updateCode(CODE_COMPLETION_TEXTBOX.value);
CODE_COMPLETION_TEXTBOX.onscroll = sync_scroll;
CODE_COMPLETION_TEXTBOX.onkeydown = (event) => {
let code = CODE_COMPLETION_TEXTBOX.value;
if (event.key == "Tab") {
/* Tab key pressed */
event.preventDefault(); // stop normal
let before_tab = code.slice(0, CODE_COMPLETION_TEXTBOX.selectionStart); // text before tab
let after_tab = code.slice(CODE_COMPLETION_TEXTBOX.selectionEnd, CODE_COMPLETION_TEXTBOX.value.length); // text after tab
let cursor_pos = CODE_COMPLETION_TEXTBOX.selectionStart + 1; // where cursor moves after tab - moving forward by 1 char to after tab
CODE_COMPLETION_TEXTBOX.value = before_tab + "\t" + after_tab; // add tab char
// move cursor
CODE_COMPLETION_TEXTBOX.selectionStart = cursor_pos;
CODE_COMPLETION_TEXTBOX.selectionEnd = cursor_pos;
update(CODE_COMPLETION_TEXTBOX.value); // Update text to include indent
}
};
});
const DEFAULT_DATA = {
labels: ['label', 'label', 'label', 'label', 'label'],
datasets: [{
borderWidth: 1
}]
}
const CHARTS = {
'sc-canvas': new Chart(SC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['5 stars', '4 stars', '3 stars', '2 stars', '1 star'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS,
}),
'ic-canvas': new Chart(IMAGE_CLASSIFICATION_OUTPUT_CANVAS, {
type: 'bar',
data: structuredClone(DEFAULT_DATA),
options: CHART_OPTIONS
}),
'zsic-canvas': new Chart(ZSIC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['football', 'airport', 'animals'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS
}),
'od-canvas': new Chart(OD_OUTPUT_CANVAS, {
type: 'bar',
data: structuredClone(DEFAULT_DATA),
options: CHART_OPTIONS
}),
'zsc-canvas': new Chart(ZSC_OUTPUT_CANVAS, {
type: 'bar',
data: {
labels: ['urgent', 'not urgent', 'phone', 'tablet', 'microwave'],
datasets: [{
borderWidth: 1
}]
},
options: CHART_OPTIONS
}),
};
[
[ZSIC_CLASSES, ZSIC_OUTPUT_CANVAS],
[ZSC_CLASSES, ZSC_OUTPUT_CANVAS],
].forEach(x => {
let [input, chart] = x;
input.addEventListener('input', () => {
// Update labels of graph
let chartToUpdate = CHARTS[chart.id];
chartToUpdate.data.labels = getZSClasses(input);
chartToUpdate.data.datasets[0].data = new Array(chartToUpdate.data.labels.length).fill(0);
chartToUpdate.update();
})
});
function getZSClasses(elem) {
// Get zero-shot classes from input element
return elem.value.split(/\s*,+\s*/g).filter(x => x);
}
function updateVisibility() {
// Set default parameters for task
updateParams(TASK_SELECTOR.value);
for (let element of TASKS) {
if (element.getAttribute('task').split(',').includes(TASK_SELECTOR.value)) {
element.style.display = 'block';
} else {
element.style.display = 'none';
}
}
}
updateVisibility();
// Add event listeners
TASK_SELECTOR.addEventListener('input', updateVisibility);
function parseValue(value, type) {
switch (type) {
case 'number':
return Number(value);
case 'bool':
return value === 'true'
default:
return value
}
}
function isVisible(e) {
// https://stackoverflow.com/a/38873788
return !!(e.offsetWidth || e.offsetHeight || e.getClientRects().length);
}
GENERATE_BUTTON.addEventListener('click', async (e) => {
// Set and pass generation settings to web worker
let data = {
task: TASK_SELECTOR.value,
generation: Object.fromEntries([...GENERATION_OPTIONS]
.filter(isVisible) // Only use parameters that are visible on screen
.map(x => {
let value = parseValue(x.value, x.getAttribute('datatype'));
return [x.getAttribute('param-name'), value]
}))
};
switch (TASK_SELECTOR.value) {
case 'translation':
data.languageFrom = LANGUAGE_FROM.value
data.languageTo = LANGUAGE_TO.value
data.text = INPUT_TEXTBOX.value
data.elementIdToUpdate = OUTPUT_TEXTBOX.id
break;
case 'text-generation':
data.text = TEXT_GENERATION_TEXTBOX.value
data.elementIdToUpdate = TEXT_GENERATION_TEXTBOX.id
break;
case 'code-completion':
data.text = CODE_BLOCKS[CODE_COMPLETION_CONTAINER.id].text();
data.elementIdToUpdate = CODE_COMPLETION_CONTAINER.id
data.targetType = 'code'
break;
case 'masked-language-modelling':
data.text = MLM_INPUT_TEXTBOX.value
data.elementIdToUpdate = MLM_OUTPUT_TEXTBOX.id
break;
case 'sequence-classification':
data.text = SC_INPUT_TEXTBOX.value
data.elementIdToUpdate = SC_OUTPUT_CANVAS.id
data.targetType = 'chart'
break;
case 'token-classification':
data.text = TC_INPUT_TEXTBOX.value
data.elementIdToUpdate = TC_OUTPUT.id
data.targetType = 'tokens'
break;
case 'zero-shot-classification':
data.text = ZSC_INPUT_TEXTBOX.value
data.classes = getZSClasses(ZSC_CLASSES);
data.elementIdToUpdate = ZSC_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'question-answering':
data.context = QA_CONTEXT_TEXTBOX.value
data.question = QA_QUESTION_TEXTBOX.value
data.elementIdToUpdate = QA_ANSWER_TEXTBOX.id
break;
case 'summarization':
data.text = SUMMARIZATION_INPUT_TEXTBOX.value
data.elementIdToUpdate = SUMMARIZATION_OUTPUT_TEXTBOX.id
break;
case 'automatic-speech-recognition':
const sampling_rate = 16000;
const audioCTX = new AudioContext({ sampleRate: sampling_rate })
const response = await (await fetch(SPEECH2TEXT_AUDIO.currentSrc)).arrayBuffer()
const decoded = await audioCTX.decodeAudioData(response)
data.audio = decoded.getChannelData(0);
data.elementIdToUpdate = SPEECH2TEXT_OUTPUT_TEXTBOX.id
break;
case 'image-to-text':
data.image = getImageDataFromImage(TEXT2IMAGE_IMG)
data.elementIdToUpdate = TEXT2IMAGE_OUTPUT_TEXTBOX.id
break;
case 'image-classification':
data.image = getImageDataFromImage(IMAGE_CLASSIFICATION_IMG)
data.elementIdToUpdate = IMAGE_CLASSIFICATION_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'zero-shot-image-classification':
data.image = getImageDataFromImage(ZSIC_IMG)
data.classes = getZSClasses(ZSIC_CLASSES);
data.elementIdToUpdate = ZSIC_OUTPUT_CANVAS.id
data.targetType = 'chart'
data.updateLabels = true
break;
case 'object-detection':
data.image = getImageDataFromImage(OD_IMG)
data.targetType = 'overlay'
data.chartId = OD_OUTPUT_CANVAS.id
data.elementIdToUpdate = OD_OUTPUT_OVERLAY.id
break;
default:
return;
}
worker.postMessage(data);
});
// Handle result returned by the web worker
worker.addEventListener('message', (event) => {
const message = event.data;
switch (message.type) {
case 'download': // for session creation
if (message.data.status === 'initiate') {
PROGRESS.style.display = 'block';
// create progress bar
PROGRESS_BARS.appendChild(htmlToElement(`
<div class="progress w-100" model="${message.data.name}" file="${message.data.file}">
<div class="progress-bar" role="progressbar"></div>
</div>
`));
} else {
let bar = PROGRESS_BARS.querySelector(`.progress[model="${message.data.name}"][file="${message.data.file}"]> .progress-bar`)
switch (message.data.status) {
case 'progress':
// update existing bar
bar.style.width = message.data.progress.toFixed(2) + '%';
bar.textContent = `${message.data.file} (${formatBytes(message.data.loaded)} / ${formatBytes(message.data.total)})`;
break;
case 'done':
// Remove the progress bar
bar.parentElement.remove();
break;
case 'ready':
// Pipeline is ready - hide container
PROGRESS.style.display = 'none';
PROGRESS_BARS.innerHTML = '';
break;
}
}
break;
case 'update': // for generation
let target = message.target;
let elem = document.getElementById(target);
switch (message.targetType) {
case 'code':
CODE_BLOCKS[target].update(message.data);
break;
default: // is textbox
elem.value = message.data
break;
}
break;
case 'complete':
switch (message.targetType) {
case 'chart':
const chartToUpdate = CHARTS[message.target];
let chartData = chartToUpdate.data.datasets[0].data;
if (message.updateLabels) {
for (let i = 0; i < message.data.length; ++i) {
let item = message.data[i];
chartData[i] = item.score;
chartToUpdate.data.labels[i] = item.label;
}
} else {
// set data, ensuring labels align correctly
for (let item of message.data) {
chartData[
chartToUpdate.data.labels.indexOf(item.label)
] = item.score
}
}
chartToUpdate.update(); // update the chart
break;
case 'tokens':
let target = document.getElementById(message.target);
target.innerHTML = '';
let tokens = message.data;
for (let token of tokens) {
let elem;
if (token.type === 'O') {
elem = document.createTextNode(token.text);
} else {
let [textColour, backgroundColour, tagColour] = NER_TAGS[token.type];
elem = htmlToElement(`<span class="ner-container" style="background-color: ${backgroundColour}; color: ${textColour};">${token.text}<span class="ner-tag" style="background-color: ${tagColour}; color: ${backgroundColour};">${token.type}</span></span>`);
}
target.appendChild(elem);
}
break;
case 'overlay':
let parent = document.getElementById(message.target);
// Clear previous output, just in case
parent.innerHTML = '';
let viewbox = parent.viewBox.baseVal;
let colours = [];
let borderColours = [];
let items = message.data;
for (let i = 0; i < items.length; ++i) {
const box = items[i].box;
let svgns = "http://www.w3.org/2000/svg";
let rect = document.createElementNS(svgns, 'rect');
rect.setAttribute('x', viewbox.width * box.xmin);
rect.setAttribute('y', viewbox.height * box.ymin);
rect.setAttribute('width', viewbox.width * (box.xmax - box.xmin));
rect.setAttribute('height', viewbox.height * (box.ymax - box.ymin));
const colour = COLOURS[i % COLOURS.length];
rect.style.stroke = rect.style.fill = `rgba(${colour}, 1)`;
colours.push(`rgba(${colour}, 0.5)`);
borderColours.push(`rgba(${colour}, 1)`);
parent.appendChild(rect);
}
// Update chart label and data
const chart = CHARTS[message.chartId];
chart.data.labels = items.map(x => x.label);
chart.data.datasets[0] = {
data: items.map(x => x.score),
backgroundColor: colours,
borderColor: borderColours
};
chart.update()
break;
default: // is text
document.getElementById(message.target).value = message.data
break;
}
break;
default:
break;
}
});
// Utility functions
function escapeHtml(unsafe) {
return unsafe.replaceAll('&', '&').replaceAll('<', '<').replaceAll('>', '>').replaceAll('"', '"').replaceAll("'", ''');
}
function htmlToElement(html) {
// https://stackoverflow.com/a/35385518
let template = document.createElement('template');
html = html.trim(); // Never return a text node of whitespace as the result
template.innerHTML = html;
return template.content.firstChild;
}
function formatBytes(bytes, decimals = 0) {
const sizes = ["Bytes", "KB", "MB", "GB", "TB"];
if (bytes === 0) return "0 Bytes";
const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1000)), 10);
const rounded = (bytes / Math.pow(1000, i)).toFixed(decimals);
return rounded + " " + sizes[i];
}
function getImageDataFromImage(original) {
// Helper function to get image data from image element
const canvas = document.createElement('canvas');
canvas.width = original.naturalWidth;
canvas.height = original.naturalHeight;
const ctx = canvas.getContext('2d');
// TODO play around with ctx options?
// ctx.patternQuality = 'bilinear';
// ctx.quality = 'bilinear';
// ctx.antialias = 'default';
// ctx.imageSmoothingQuality = 'high';
ctx.drawImage(original, 0, 0, canvas.width, canvas.height);
return canvas.toDataURL();
}
| transformers.js/examples/demo-site/src/main.js/0 | {
"file_path": "transformers.js/examples/demo-site/src/main.js",
"repo_id": "transformers.js",
"token_count": 9224
} |
{
"name": "electron",
"productName": "electron",
"version": "1.0.0",
"description": "Transformers.js sample Electron application",
"main": "src/index.js",
"scripts": {
"start": "electron-forge start",
"package": "electron-forge package",
"make": "electron-forge make",
"publish": "electron-forge publish",
"lint": "echo \"No linting configured\""
},
"keywords": [],
"author": "Xenova",
"license": "MIT",
"dependencies": {
"@xenova/transformers": "^2.6.2",
"electron-squirrel-startup": "^1.0.0"
},
"devDependencies": {
"@electron-forge/cli": "^6.1.1",
"@electron-forge/maker-deb": "^6.1.1",
"@electron-forge/maker-rpm": "^6.1.1",
"@electron-forge/maker-squirrel": "^6.1.1",
"@electron-forge/maker-zip": "^6.1.1",
"electron": "^24.1.1"
}
}
| transformers.js/examples/electron/package.json/0 | {
"file_path": "transformers.js/examples/electron/package.json",
"repo_id": "transformers.js",
"token_count": 361
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Transformers.js | Sample Browser Extension</title>
<!-- Load styles -->
<link rel="stylesheet" href="popup.css" />
</head>
<body>
<div class="container">
<h1>Transformers.js</h1>
<h2>Run ð€ Transformers in a Browser Extension!</h2>
<input id="text" placeholder="Enter text here">
<pre id="output"></pre>
</div>
</body>
</html> | transformers.js/examples/extension/src/popup.html/0 | {
"file_path": "transformers.js/examples/extension/src/popup.html",
"repo_id": "transformers.js",
"token_count": 246
} |
import { pipeline } from '@xenova/transformers';
import wavefile from 'wavefile';
// Load model
let transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
// Load audio data
let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
let buffer = Buffer.from(await fetch(url).then(x => x.arrayBuffer()))
// Read .wav file and convert it to required format
let wav = new wavefile.WaveFile(buffer);
wav.toBitDepth('32f'); // Pipeline expects input as a Float32Array
wav.toSampleRate(16000); // Whisper expects audio with a sampling rate of 16000
let audioData = wav.getSamples();
if (Array.isArray(audioData)) {
if (audioData.length > 1) {
const SCALING_FACTOR = Math.sqrt(2);
// Merge channels (into first channel to save memory)
for (let i = 0; i < audioData[0].length; ++i) {
audioData[0][i] = SCALING_FACTOR * (audioData[0][i] + audioData[1][i]) / 2;
}
}
// Select first channel
audioData = audioData[0];
}
// Run model
let start = performance.now();
let output = await transcriber(audioData);
let end = performance.now();
console.log(`Execution duration: ${(end - start) / 1000} seconds`);
console.log(output);
// { text: ' And so my fellow Americans ask not what your country can do for you, ask what you can do for your country.' }
| transformers.js/examples/node-audio-processing/index.js/0 | {
"file_path": "transformers.js/examples/node-audio-processing/index.js",
"repo_id": "transformers.js",
"token_count": 479
} |
import { pipeline } from '@xenova/transformers';
/**
* This class uses the Singleton pattern to ensure that only one instance of the
* pipeline is loaded. This is because loading the pipeline is an expensive
* operation and we don't want to do it every time we want to translate a sentence.
*/
class MyTranslationPipeline {
static task = 'translation';
static model = 'Xenova/nllb-200-distilled-600M';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
this.instance = pipeline(this.task, this.model, { progress_callback });
}
return this.instance;
}
}
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Retrieve the translation pipeline. When called for the first time,
// this will load the pipeline and save it for future use.
let translator = await MyTranslationPipeline.getInstance(x => {
// We also add a progress callback to the pipeline so that we can
// track model loading.
self.postMessage(x);
});
// Actually perform the translation
let output = await translator(event.data.text, {
tgt_lang: event.data.tgt_lang,
src_lang: event.data.src_lang,
// Allows for partial output
callback_function: x => {
self.postMessage({
status: 'update',
output: translator.tokenizer.decode(x[0].output_token_ids, { skip_special_tokens: true })
});
}
});
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: output,
});
});
| transformers.js/examples/react-translator/src/worker.js/0 | {
"file_path": "transformers.js/examples/react-translator/src/worker.js",
"repo_id": "transformers.js",
"token_count": 614
} |
# Semantic Image Search
This example shows you how to use Transformers.js to create a semantic image search engine. Check out the demo [here](https://huggingface.co/spaces/Xenova/semantic-image-search).

## Getting Started
### Dataset
This application uses images from [The Unsplash Dataset](https://github.com/unsplash/datasets), which you can download [here](https://unsplash.com/data/lite/latest). All you need for this demo is the `photos.tsv000` TSV file, which contains the metadata for all the images.
### Connecting to Supabase
After creating a new [Supabase](https://supabase.com/) project, you'll need to:
1. Create an `images` table and import the data from `photos.tsv000`.
2. Add a column for `image_embeddings`:
```sql
-- Add a new vector column with a dimension of 512
alter table images add column image_embedding vector(512);
```
3. Add your `SUPABASE_URL`, `SUPABASE_ANON_KEY`, and `SUPABASE_SECRET_KEY` keys to a `.env.local` file (see `.env.local.example` for template).
4. Update the image embeddings in your database by running the following command:
```bash
SUPABASE_URL=your-project-url \
SUPABASE_SECRET_KEY=your-secret-key \
node scripts/update-database.mjs
```
*Note:* This will take a while. Also, since queries are capped at 1000 returned rows, you'll need to run this command multiple times to insert all 25000 rows.
5. Create a new `match_images` [database function](https://supabase.com/docs/guides/database/functions):
```sql
-- https://supabase.com/blog/openai-embeddings-postgres-vector
create or replace function match_images (
query_embedding vector(512),
match_threshold float,
match_count int
)
returns table (
photo_id text,
photo_url text,
photo_image_url text,
photo_width int,
photo_height int,
photo_aspect_ratio float,
photo_description text,
ai_description text,
blur_hash text,
similarity float
)
language sql stable
as $$
select
photo_id,
photo_url,
photo_image_url,
photo_width,
photo_height,
photo_aspect_ratio,
photo_description,
ai_description,
blur_hash,
1 - (image_embedding <=> query_embedding) as similarity
from images
where 1 - (image_embedding <=> query_embedding) > match_threshold
order by similarity desc
limit match_count;
$$;
```
5. Add a [database policy](https://supabase.com/docs/guides/auth/row-level-security#policies) to allow users to view the database:
```sql
create policy "policy_name"
on public.images
for select using (
true
);
```
### Development
You can now run the development server with:
```bash
npm run dev
```
Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
| transformers.js/examples/semantic-image-search/README.md/0 | {
"file_path": "transformers.js/examples/semantic-image-search/README.md",
"repo_id": "transformers.js",
"token_count": 1129
} |
'use client'
import { useState } from 'react'
import { Modal } from './components/Modal';
import { SearchBar } from './components/SearchBar';
import { ImageGrid } from './components/ImageGrid';
export default function Home() {
// Application state
const [images, setImages] = useState(null);
const [currentImage, setCurrentImage] = useState(null);
const search = async (text) => {
if (!text) return;
const params = new URLSearchParams();
params.append('text', text);
params.append('threshold', 0.1);
params.append('limit', 100);
// Make a request to the /classify route on the server.
const result = await fetch(`/search?${params.toString()}`);
const json = await result.json();
setImages(json);
};
return (
<main className="mx-auto max-w-[1960px] p-4 relative">
<Modal currentImage={currentImage} setCurrentImage={setCurrentImage} />
<SearchBar search={search} />
<ImageGrid images={images} setCurrentImage={setCurrentImage} />
</main>
)
}
| transformers.js/examples/semantic-image-search/src/app/page.js/0 | {
"file_path": "transformers.js/examples/semantic-image-search/src/app/page.js",
"repo_id": "transformers.js",
"token_count": 345
} |
// Although not strictly necessary, we delegate the tokenization to a worker thread to avoid
// any potential issues with the tokenizer blocking the main thread (especially for large inputs).
import { env, AutoTokenizer } from '@xenova/transformers'
env.allowLocalModels = false;
// This is a map of all the tokenizer instances that we have loaded.
// model_id -> promise that resolves to tokenizer
const TOKENIZER_MAPPINGS = new Map();
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
let tokenizerPromise = TOKENIZER_MAPPINGS.get(event.data.model_id);
// Load the tokenizer if it hasn't been loaded yet
if (!tokenizerPromise) {
tokenizerPromise = AutoTokenizer.from_pretrained(event.data.model_id);
TOKENIZER_MAPPINGS.set(event.data.model_id, new Promise((resolve) => {
// Just for visualization purposes, we may need to modify the tokenizer slightly
tokenizerPromise.then((tokenizer) => {
// NOTE: We just remove the StripDecoder from the llama tokenizer
switch (tokenizer.constructor.name) {
case 'LlamaTokenizer':
case 'Grok1Tokenizer':
// tokenizer.decoder.decoders.at(-1).constructor.name === 'StripDecoder'
tokenizer.decoder.decoders.pop();
break;
case 'T5Tokenizer':
tokenizer.decoder.addPrefixSpace = false;
break;
}
resolve(tokenizer);
});
}));
}
const tokenizer = await tokenizerPromise;
const text = event.data.text;
const start = performance.now();
const token_ids = tokenizer.encode(text);
const end = performance.now();
console.log('[INFO]', `Tokenized ${text.length} characters in ${(end - start).toFixed(2)}ms`)
let decoded = token_ids.map(x => tokenizer.decode([x]));
let margins = [];
// Minor post-processing for visualization purposes
switch (tokenizer.constructor.name) {
case 'BertTokenizer':
margins = decoded.map((x, i) => i === 0 || x.startsWith('##') ? 0 : 8);
decoded = decoded.map(x => x.replace('##', ''));
break;
case 'T5Tokenizer':
if (decoded.length > 0 && decoded.length !== ' ') {
decoded[0] = decoded[0].replace(/^ /, '');
}
break;
}
// Send the output back to the main thread
self.postMessage({
token_ids, decoded, margins
});
}); | transformers.js/examples/tokenizer-playground/src/worker.js/0 | {
"file_path": "transformers.js/examples/tokenizer-playground/src/worker.js",
"repo_id": "transformers.js",
"token_count": 1112
} |
import './style.css';
import { env, AutoModel, AutoProcessor, RawImage } from '@xenova/transformers';
env.backends.onnx.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/[email protected]/dist/';
env.backends.onnx.wasm.numThreads = 1;
// Reference the elements that we will need
const status = document.getElementById('status');
const container = document.getElementById('container');
const canvas = document.getElementById('canvas');
const outputCanvas = document.getElementById('output-canvas');
const video = document.getElementById('video');
const sizeSlider = document.getElementById('size');
const sizeLabel = document.getElementById('size-value');
const scaleSlider = document.getElementById('scale');
const scaleLabel = document.getElementById('scale-value');
function setStreamSize(width, height) {
video.width = outputCanvas.width = canvas.width = Math.round(width);
video.height = outputCanvas.height = canvas.height = Math.round(height);
}
status.textContent = 'Loading model...';
// Load model and processor
const model_id = 'Xenova/modnet';
let model;
try {
model = await AutoModel.from_pretrained(model_id, {
device: 'webgpu',
dtype: 'fp32', // TODO: add fp16 support
});
} catch (err) {
status.textContent = err.message;
alert(err.message)
throw err;
}
const processor = await AutoProcessor.from_pretrained(model_id);
// Set up controls
let size = 256;
processor.feature_extractor.size = { shortest_edge: size };
sizeSlider.addEventListener('input', () => {
size = Number(sizeSlider.value);
processor.feature_extractor.size = { shortest_edge: size };
sizeLabel.textContent = size;
});
sizeSlider.disabled = false;
let scale = 0.5;
scaleSlider.addEventListener('input', () => {
scale = Number(scaleSlider.value);
setStreamSize(video.videoWidth * scale, video.videoHeight * scale);
scaleLabel.textContent = scale;
});
scaleSlider.disabled = false;
status.textContent = 'Ready';
let isProcessing = false;
let previousTime;
const context = canvas.getContext('2d', { willReadFrequently: true });
const outputContext = outputCanvas.getContext('2d', { willReadFrequently: true });
function updateCanvas() {
const { width, height } = canvas;
if (!isProcessing) {
isProcessing = true;
(async function () {
// Read the current frame from the video
context.drawImage(video, 0, 0, width, height);
const currentFrame = context.getImageData(0, 0, width, height);
const image = new RawImage(currentFrame.data, width, height, 4);
// Pre-process image
const inputs = await processor(image);
// Predict alpha matte
const { output } = await model({ input: inputs.pixel_values });
const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(width, height);
// Update alpha channel
const outPixelData = currentFrame;
for (let i = 0; i < mask.data.length; ++i) {
outPixelData.data[4 * i + 3] = mask.data[i];
}
outputContext.putImageData(outPixelData, 0, 0);
if (previousTime !== undefined) {
const fps = 1000 / (performance.now() - previousTime);
status.textContent = `FPS: ${fps.toFixed(2)}`;
}
previousTime = performance.now();
isProcessing = false;
})();
}
window.requestAnimationFrame(updateCanvas);
}
// Start the video stream
navigator.mediaDevices.getUserMedia(
{ video: true }, // Ask for video
).then((stream) => {
// Set up the video and canvas elements.
video.srcObject = stream;
video.play();
const videoTrack = stream.getVideoTracks()[0];
const { width, height } = videoTrack.getSettings();
setStreamSize(width * scale, height * scale);
// Set container width and height depending on the image aspect ratio
const ar = width / height;
const [cw, ch] = (ar > 720 / 405) ? [720, 720 / ar] : [405 * ar, 405];
container.style.width = `${cw}px`;
container.style.height = `${ch}px`;
// Start the animation loop
setTimeout(updateCanvas, 50);
}).catch((error) => {
alert(error);
});
| transformers.js/examples/webgpu-video-background-removal/main.js/0 | {
"file_path": "transformers.js/examples/webgpu-video-background-removal/main.js",
"repo_id": "transformers.js",
"token_count": 1573
} |
import { FEATURE_EXTRACTOR_NAME } from "../utils/constants.js";
import { Callable } from "../utils/generic.js";
import { getModelJSON } from "../utils/hub.js";
/**
* Base class for feature extractors.
*/
export class FeatureExtractor extends Callable {
/**
* Constructs a new FeatureExtractor instance.
*
* @param {Object} config The configuration for the feature extractor.
*/
constructor(config) {
super();
this.config = config
}
/**
* Instantiate one of the feature extractor classes of the library from a pretrained model.
*
* The feature extractor class to instantiate is selected based on the `feature_extractor_type` property of
* the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
*
* @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
* - A string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co.
* Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
* user or organization name, like `dbmdz/bert-base-german-cased`.
* - A path to a *directory* containing feature_extractor files, e.g., `./my_model_directory/`.
* @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the feature_extractor.
*
* @returns {Promise<FeatureExtractor>} A new instance of the Feature Extractor class.
*/
static async from_pretrained(pretrained_model_name_or_path, options) {
const config = await getModelJSON(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, true, options);
return new this(config);
}
}
/**
* Helper function to validate audio inputs.
* @param {any} audio The audio data.
* @param {string} feature_extractor The name of the feature extractor.
* @private
*/
export function validate_audio_inputs(audio, feature_extractor) {
if (!(audio instanceof Float32Array || audio instanceof Float64Array)) {
throw new Error(
`${feature_extractor} expects input to be a Float32Array or a Float64Array, but got ${audio?.constructor?.name ?? typeof audio} instead. ` +
`If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.`
)
}
}
| transformers.js/src/base/feature_extraction_utils.js/0 | {
"file_path": "transformers.js/src/base/feature_extraction_utils.js",
"repo_id": "transformers.js",
"token_count": 822
} |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
import { cat, full, interpolate_4d, slice, stack } from "../../utils/tensor.js";
export class Idefics3ImageProcessor extends ImageProcessor {
constructor(config) {
super(config);
this.do_image_splitting = config.do_image_splitting ?? true;
this.max_image_size = config.max_image_size;
}
/**
* @typedef {import('../../utils/image.js').RawImage} RawImage
* @typedef {import('../../utils/tensor.js').Tensor} Tensor
*/
/**
* Calculate size to resize images to, to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
* @param {Tensor} pixel_values Tensor of the image to resize.
* @param {number} vision_encoder_max_size Maximum size of the output image. If the image is larger than this size,
* it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size.
*/
get_resize_for_vision_encoder(pixel_values, vision_encoder_max_size) {
let [height, width] = pixel_values.dims.slice(-2);
const aspect_ratio = width / height;
if (width >= height) {
width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size;
height = Math.floor(width / aspect_ratio);
height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size;
} else {
height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size;
width = Math.floor(height * aspect_ratio);
width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size;
}
return { height, width };
}
/** @param {RawImage|RawImage[]|RawImage[][]} images */
async _call(images, {
do_image_splitting = null,
return_row_col_info = false,
} = {}) {
/** @type {RawImage[][]} */
let batched_2d_images;
if (!Array.isArray(images)) {
batched_2d_images = [[images]];
} else {
if (images.length === 0 || !images[0]) {
throw new Error("No images provided.");
}
if (!Array.isArray(images[0])) {
batched_2d_images = [/** @type {RawImage[]} */(images)];
} else {
batched_2d_images = /** @type {RawImage[][]} */(images);
}
}
// List of tensors, each with shape [patches, channels, height, width]
let all_pixel_values = [];
let images_list_rows = [];
let images_list_cols = [];
const original_sizes = [];
const reshaped_input_sizes = [];
for (const image_batch of batched_2d_images) {
let images_list = await Promise.all(image_batch.map(x => this.preprocess(x)));
// Original sizes of images
original_sizes.push(...images_list.map(x => x.original_size));
// Reshaped sizes of images, before padding or cropping
reshaped_input_sizes.push(...images_list.map(x => x.reshaped_input_size));
// Convert images to 4D tensors for easier processing
images_list.forEach(x => x.pixel_values.unsqueeze_(0));
const { longest_edge } = this.max_image_size;
/** @type {Tensor[]} */
let images_tensor;
if (do_image_splitting ?? this.do_image_splitting) {
let image_rows = new Array(images_list.length);
let image_cols = new Array(images_list.length);
// We first resize both height and width of each image to the nearest max_image_size multiple, disregarding the aspect ratio
images_tensor = await Promise.all(images_list.map(async (x, i) => {
const new_size = this.get_resize_for_vision_encoder(x.pixel_values, longest_edge);
const resized = await interpolate_4d(x.pixel_values, {
size: [new_size.height, new_size.width],
});
const { frames, num_splits_h, num_splits_w } = await this.split_image(resized, this.max_image_size);
image_rows[i] = num_splits_h;
image_cols[i] = num_splits_w;
return cat(frames, 0);
}));
images_list_rows.push(image_rows);
images_list_cols.push(image_cols);
} else {
/** @type {[number, number]} */
const size = [longest_edge, longest_edge];
images_tensor = await Promise.all(
images_list.map(x => interpolate_4d(x.pixel_values, { size }))
);
images_list_rows.push(new Array(images_list.length).fill(0));
images_list_cols.push(new Array(images_list.length).fill(0));
}
all_pixel_values.push(cat(images_tensor, 0));
}
const batch_size = all_pixel_values.length;
const [n, c, h, w] = all_pixel_values[0].dims;
// Stack pixel values
let pixel_values;
let pixel_attention_mask;
if (batch_size === 1) {
pixel_values = all_pixel_values[0].unsqueeze_(0);
pixel_attention_mask = full([batch_size, n, h, w], true);
} else {
// Add padding (if necessary) to images with less patches than the maximum number of patches
const max_num_patches = Math.max(...all_pixel_values.map(x => x.dims.at(0)));
pixel_attention_mask = full([batch_size, max_num_patches, h, w], true);
const pixel_attention_mask_data = pixel_attention_mask.data;
const pixel_attention_mask_stride = max_num_patches * h * w;
for (let i = 0; i < batch_size; ++i) {
const num_patches = all_pixel_values[i].dims[0];
if (num_patches < max_num_patches) {
all_pixel_values[i] = cat([
all_pixel_values[i],
full([max_num_patches - num_patches, c, h, w], 0),
], 0);
const start_offset = i * pixel_attention_mask_stride + num_patches * h * w;
const end_offset = (i + 1) * pixel_attention_mask_stride;
// @ts-expect-error
pixel_attention_mask_data.fill(false, start_offset, end_offset);
}
}
pixel_values = stack(all_pixel_values, 0);
}
return {
pixel_values,
pixel_attention_mask,
original_sizes,
reshaped_input_sizes,
...(
return_row_col_info
? { rows: images_list_rows, cols: images_list_cols }
: {}
),
}
}
async split_image(pixel_values, { longest_edge }) {
const max_height = longest_edge;
const max_width = longest_edge;
const frames = [];
const [height, width] = pixel_values.dims.slice(-2);
let num_splits_h = 0, num_splits_w = 0;
if (height > max_height || width > max_width) {
// Calculate the number of splits
num_splits_h = Math.ceil(height / max_height);
num_splits_w = Math.ceil(width / max_width);
// Calculate the optimal width and height for the sub-images
const optimal_height = Math.ceil(height / num_splits_h);
const optimal_width = Math.ceil(width / num_splits_w);
// Iterate through each row and column
for (let r = 0; r < num_splits_h; ++r) {
for (let c = 0; c < num_splits_w; ++c) {
let start_x, start_y, end_x, end_y;
if (r === num_splits_h - 1) { // At bottom
start_y = height - optimal_height;
end_y = height;
} else {
start_y = r * optimal_height;
end_y = (r + 1) * optimal_height;
}
if (c === num_splits_w - 1) { // At right
start_x = width - optimal_width;
end_x = width;
} else {
start_x = c * optimal_width;
end_x = (c + 1) * optimal_width;
}
const starts = [start_y, start_x];
const ends = [end_y, end_x];
const patch = await slice(pixel_values, starts, ends, [2, 3]);
frames.push(patch);
}
}
// Resize the global image to match max dimensions for memory efficiency
const global_image_height = max_height;
const global_image_width = max_width;
if (height !== global_image_height || width !== global_image_width) {
pixel_values = await interpolate_4d(pixel_values, {
size: [global_image_height, global_image_width],
})
}
}
frames.push(pixel_values);
return { frames, num_splits_h, num_splits_w };
}
}
| transformers.js/src/models/idefics3/image_processing_idefics3.js/0 | {
"file_path": "transformers.js/src/models/idefics3/image_processing_idefics3.js",
"repo_id": "transformers.js",
"token_count": 4606
} |
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js';
import { Tensor } from '../../utils/tensor.js';
export class MoonshineFeatureExtractor extends FeatureExtractor {
/**
* Asynchronously extracts input values from a given audio using the provided configuration.
* @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array.
* @returns {Promise<{ input_values: Tensor; }>} The extracted input values.
*/
async _call(audio) {
validate_audio_inputs(audio, 'MoonshineFeatureExtractor');
if (audio instanceof Float64Array) {
audio = new Float32Array(audio);
}
const shape = [
1, /* batch_size */
audio.length, /* num_samples */
];
return {
input_values: new Tensor('float32', audio, shape),
};
}
}
| transformers.js/src/models/moonshine/feature_extraction_moonshine.js/0 | {
"file_path": "transformers.js/src/models/moonshine/feature_extraction_moonshine.js",
"repo_id": "transformers.js",
"token_count": 364
} |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
import { calculateDimensions } from "../../utils/core.js";
import {
interpolate_4d,
Tensor,
} from "../../utils/tensor.js";
/**
* @typedef {object} SamImageProcessorResult
* @property {Tensor} pixel_values
* @property {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes
* @property {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes
* @property {Tensor} [input_points]
* @property {Tensor} [input_labels]
* @property {Tensor} [input_boxes]
*/
export class SamImageProcessor extends ImageProcessor {
/**
*
* @param {any} input_points
* @param {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes
* @param {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes
* @returns {Tensor}
*/
reshape_input_points(input_points, original_sizes, reshaped_input_sizes, is_bounding_box = false) {
// Make deep copy to avoid altering user's input
input_points = structuredClone(input_points);
let shape = calculateDimensions(input_points);
// TODO: add support for 2D input_points
if (shape.length === 3) {
// Correct user's input
if (!is_bounding_box) {
shape = [1, ...shape];
}
input_points = [input_points];
} else if (shape.length !== 4) {
throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.")
}
// Reshape input points
for (let i = 0; i < input_points.length; ++i) { // batch_size
let originalImageSize = original_sizes[i];
let reshapedImageSize = reshaped_input_sizes[i];
let resizeFactors = [
reshapedImageSize[0] / originalImageSize[0],
reshapedImageSize[1] / originalImageSize[1]
]
for (let j = 0; j < input_points[i].length; ++j) { // point_batch_size
for (let k = 0; k < input_points[i][j].length; ++k) { // nb_points_per_image
for (let w = 0; w < input_points[i][j][k].length; ++w) { // 2 or 4
input_points[i][j][k][w] *= resizeFactors[w % 2];
}
}
}
}
return new Tensor(
'float32',
Float32Array.from(input_points.flat(Infinity)),
shape
)
}
/**
*
* @param {any} input_labels
* @param {Tensor} input_points
* @returns {Tensor}
*/
add_input_labels(input_labels, input_points) {
let shape = calculateDimensions(input_labels);
if (shape.length === 2) {
// Correct user's input
shape = [1, ...shape];
input_labels = [input_labels];
} else if (shape.length !== 3) {
throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.")
}
if (shape.some((x, i) => x !== input_points.dims[i])) {
throw Error(`The first ${shape.length} dimensions of 'input_points' and 'input_labels' must be the same.`)
}
return new Tensor(
'int64',
input_labels.flat(Infinity).map(BigInt),
shape,
)
}
/**
* @param {any[]} images The URL(s) of the image(s) to extract features from.
* @param {Object} [options] Additional options for the processor.
* @param {any} [options.input_points=null] A 3D or 4D array, representing the input points provided by the user.
* - 3D: `[point_batch_size, nb_points_per_image, 2]`. In this case, `batch_size` is assumed to be 1.
* - 4D: `[batch_size, point_batch_size, nb_points_per_image, 2]`.
* @param {any} [options.input_labels=null] A 2D or 3D array, representing the input labels for the points, used by the prompt encoder to encode the prompt.
* - 2D: `[point_batch_size, nb_points_per_image]`. In this case, `batch_size` is assumed to be 1.
* - 3D: `[batch_size, point_batch_size, nb_points_per_image]`.
* @param {number[][][]} [options.input_boxes=null] A 3D array of shape `(batch_size, num_boxes, 4)`, representing the input boxes provided by the user.
* This is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks.
* The processor will generate a tensor, with each dimension corresponding respectively to the image batch size,
* the number of boxes per image and the coordinates of the top left and botton right point of the box.
* In the order (`x1`, `y1`, `x2`, `y2`):
* - `x1`: the x coordinate of the top left point of the input box
* - `y1`: the y coordinate of the top left point of the input box
* - `x2`: the x coordinate of the bottom right point of the input box
* - `y2`: the y coordinate of the bottom right point of the input box
* @returns {Promise<SamImageProcessorResult>}
*/
async _call(images, {
input_points = null,
input_labels = null,
input_boxes = null
} = {}) {
// TODO allow user to use preprocessed images
/** @type {SamImageProcessorResult} */
const processed = await super._call(images);
if (input_points) {
processed.input_points = this.reshape_input_points(
input_points, processed.original_sizes, processed.reshaped_input_sizes
);
}
if (input_labels) {
if (!processed.input_points) {
throw Error("`input_points` must be provided if `input_labels` are provided.")
}
processed.input_labels = this.add_input_labels(input_labels, processed.input_points);
}
if (input_boxes) {
processed.input_boxes = this.reshape_input_points(
input_boxes, processed.original_sizes, processed.reshaped_input_sizes, true,
);
}
return processed;
}
/**
* Remove padding and upscale masks to the original image size.
* @param {Tensor} masks Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
* @param {[number, number][]} original_sizes The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format.
* @param {[number, number][]} reshaped_input_sizes The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
* @param {Object} options Optional parameters for post-processing.
* @param {number} [options.mask_threshold] The threshold to use for binarizing the masks.
* @param {boolean} [options.binarize] Whether to binarize the masks.
* @param {Object} [options.pad_size] The target size the images were padded to before being passed to the model. If `null`, the target size is assumed to be the processor's `pad_size`.
* @param {number} [options.pad_size.height] The height the images were padded to.
* @param {number} [options.pad_size.width] The width the images were padded to.
* @returns {Promise<Tensor[]>} Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size.
*/
async post_process_masks(masks, original_sizes, reshaped_input_sizes, {
mask_threshold = 0.0,
binarize = true,
pad_size = null,
} = {}) {
// masks: [1, 1, 3, 256, 256]
const output_masks = [];
pad_size = pad_size ?? this.pad_size;
/** @type {[number, number]} */
const target_image_size = [pad_size.height, pad_size.width];
for (let i = 0; i < original_sizes.length; ++i) {
const original_size = original_sizes[i];
const reshaped_input_size = reshaped_input_sizes[i];
// Upscale mask to padded size
let interpolated_mask = (await interpolate_4d(
masks[i],
{ mode: 'bilinear', size: target_image_size }
));
// Crop mask
interpolated_mask = interpolated_mask.slice(null, null, [0, reshaped_input_size[0]], [0, reshaped_input_size[1]]);
// Downscale mask
interpolated_mask = (await interpolate_4d(
interpolated_mask,
{ mode: 'bilinear', size: original_size }
));
if (binarize) {
const data = interpolated_mask.data;
const binarizedMaskData = new Uint8Array(data.length);
for (let i = 0; i < data.length; ++i) {
if (data[i] > mask_threshold) {
binarizedMaskData[i] = 1;
}
}
interpolated_mask = new Tensor(
'bool',
binarizedMaskData,
interpolated_mask.dims
)
}
output_masks.push(interpolated_mask);
}
return output_masks;
}
/**
* Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
* @param {import("../../utils/image.js").RawImage} image Input original image
* @param {number} target_size Target size of the resized image
* @param {Object} options Options for generating crop boxes
* @param {number} [options.crop_n_layers] If >0, mask prediction will be run again on crops of the image.
* Sets the number of layers to run, where each layer has 2**i_layer number of image crops.
* @param {number} [options.overlap_ratio] Sets the degree to which crops overlap. In the first crop layer,
* crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap.
* @param {number} [options.points_per_crop] Number of points to sample from each crop.
* @param {number} [options.crop_n_points_downscale_factor] The number of points-per-side sampled in layer n is
* scaled down by crop_n_points_downscale_factor**n.
* @returns {Object} An object containing the crop boxes, number of points per crop, cropped images, and input labels.
*/
generate_crop_boxes(image, target_size, {
crop_n_layers = 0,
overlap_ratio = 512 / 1500,
points_per_crop = 32,
crop_n_points_downscale_factor = 1,
} = {}) {
// TODO: Implement
// return { crop_boxes, points_per_crop, cropped_images, input_labels }
}
}
| transformers.js/src/models/sam/image_processing_sam.js/0 | {
"file_path": "transformers.js/src/models/sam/image_processing_sam.js",
"repo_id": "transformers.js",
"token_count": 4498
} |
const WHISPER_LANGUAGES = [
["en", "english"],
["zh", "chinese"],
["de", "german"],
["es", "spanish"],
["ru", "russian"],
["ko", "korean"],
["fr", "french"],
["ja", "japanese"],
["pt", "portuguese"],
["tr", "turkish"],
["pl", "polish"],
["ca", "catalan"],
["nl", "dutch"],
["ar", "arabic"],
["sv", "swedish"],
["it", "italian"],
["id", "indonesian"],
["hi", "hindi"],
["fi", "finnish"],
["vi", "vietnamese"],
["he", "hebrew"],
["uk", "ukrainian"],
["el", "greek"],
["ms", "malay"],
["cs", "czech"],
["ro", "romanian"],
["da", "danish"],
["hu", "hungarian"],
["ta", "tamil"],
["no", "norwegian"],
["th", "thai"],
["ur", "urdu"],
["hr", "croatian"],
["bg", "bulgarian"],
["lt", "lithuanian"],
["la", "latin"],
["mi", "maori"],
["ml", "malayalam"],
["cy", "welsh"],
["sk", "slovak"],
["te", "telugu"],
["fa", "persian"],
["lv", "latvian"],
["bn", "bengali"],
["sr", "serbian"],
["az", "azerbaijani"],
["sl", "slovenian"],
["kn", "kannada"],
["et", "estonian"],
["mk", "macedonian"],
["br", "breton"],
["eu", "basque"],
["is", "icelandic"],
["hy", "armenian"],
["ne", "nepali"],
["mn", "mongolian"],
["bs", "bosnian"],
["kk", "kazakh"],
["sq", "albanian"],
["sw", "swahili"],
["gl", "galician"],
["mr", "marathi"],
["pa", "punjabi"],
["si", "sinhala"],
["km", "khmer"],
["sn", "shona"],
["yo", "yoruba"],
["so", "somali"],
["af", "afrikaans"],
["oc", "occitan"],
["ka", "georgian"],
["be", "belarusian"],
["tg", "tajik"],
["sd", "sindhi"],
["gu", "gujarati"],
["am", "amharic"],
["yi", "yiddish"],
["lo", "lao"],
["uz", "uzbek"],
["fo", "faroese"],
["ht", "haitian creole"],
["ps", "pashto"],
["tk", "turkmen"],
["nn", "nynorsk"],
["mt", "maltese"],
["sa", "sanskrit"],
["lb", "luxembourgish"],
["my", "myanmar"],
["bo", "tibetan"],
["tl", "tagalog"],
["mg", "malagasy"],
["as", "assamese"],
["tt", "tatar"],
["haw", "hawaiian"],
["ln", "lingala"],
["ha", "hausa"],
["ba", "bashkir"],
["jw", "javanese"],
["su", "sundanese"],
]
// @ts-ignore
export const WHISPER_LANGUAGE_MAPPING = new Map(WHISPER_LANGUAGES);
// @ts-ignore
export const WHISPER_TO_LANGUAGE_CODE_MAPPING = new Map([
...WHISPER_LANGUAGES.map(([k, v]) => [v, k]),
...[
["burmese", "my"],
["valencian", "ca"],
["flemish", "nl"],
["haitian", "ht"],
["letzeburgesch", "lb"],
["pushto", "ps"],
["panjabi", "pa"],
["moldavian", "ro"],
["moldovan", "ro"],
["sinhalese", "si"],
["castilian", "es"],
]
]);
/**
* @param {string} language The language name or code
* @returns {string} The language code
*/
export function whisper_language_to_code(language) {
language = language.toLowerCase();
// Map to code from user-friendly name (e.g., "english" -> "en")
let language_code = WHISPER_TO_LANGUAGE_CODE_MAPPING.get(language);
if (language_code === undefined) {
// User provided something that is not a language name
if (WHISPER_LANGUAGE_MAPPING.has(language)) {
// User provided the language code directly (e.g., "en")
language_code = language;
} else {
// User provided something that is not a language code or name
const is_language_code = language.length === 2;
const langs = is_language_code ? WHISPER_LANGUAGE_MAPPING.keys() : WHISPER_LANGUAGE_MAPPING.values();
throw new Error(`Language "${language}" is not supported. Must be one of: ${JSON.stringify(langs)}`);
}
}
return language_code;
}
| transformers.js/src/models/whisper/common_whisper.js/0 | {
"file_path": "transformers.js/src/models/whisper/common_whisper.js",
"repo_id": "transformers.js",
"token_count": 1848
} |
/**
* @file Utility functions to interact with the Hugging Face Hub (https://huggingface.co/models)
*
* @module utils/hub
*/
import fs from 'fs';
import path from 'path';
import { env } from '../env.js';
import { dispatchCallback } from './core.js';
/**
* @typedef {Object} PretrainedOptions Options for loading a pretrained model.
* @property {import('./core.js').ProgressCallback} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates.
* @property {import('../configs.js').PretrainedConfig} [config=null] Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:
* - The model is a model provided by the library (loaded with the *model id* string of a pretrained model).
* - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory.
* @property {string} [cache_dir=null] Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.
* @property {boolean} [local_files_only=false] Whether or not to only look at local files (e.g., not try downloading the model).
* @property {string} [revision='main'] The specific model version to use. It can be a branch name, a tag name, or a commit id,
* since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
* NOTE: This setting is ignored for local requests.
*/
/**
* @typedef {Object} ModelSpecificPretrainedOptions Options for loading a pretrained model.
* @property {string} [subfolder='onnx'] In case the relevant files are located inside a subfolder of the model repo on huggingface.co,
* you can specify the folder name here.
* @property {string} [model_file_name=null] If specified, load the model with this name (excluding the .onnx suffix). Currently only valid for encoder- or decoder-only models.
* @property {import("./devices.js").DeviceType|Record<string, import("./devices.js").DeviceType>} [device=null] The device to run the model on. If not specified, the device will be chosen from the environment settings.
* @property {import("./dtypes.js").DataType|Record<string, import("./dtypes.js").DataType>} [dtype=null] The data type to use for the model. If not specified, the data type will be chosen from the environment settings.
* @property {boolean|Record<string, boolean>} [use_external_data_format=false] Whether to load the model using the external data format (used for models >= 2GB in size).
* @property {import('onnxruntime-common').InferenceSession.SessionOptions} [session_options] (Optional) User-specified session options passed to the runtime. If not provided, suitable defaults will be chosen.
*/
/**
* @typedef {PretrainedOptions & ModelSpecificPretrainedOptions} PretrainedModelOptions Options for loading a pretrained model.
*/
/**
* Mapping from file extensions to MIME types.
*/
const CONTENT_TYPE_MAP = {
'txt': 'text/plain',
'html': 'text/html',
'css': 'text/css',
'js': 'text/javascript',
'json': 'application/json',
'png': 'image/png',
'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'gif': 'image/gif',
}
class FileResponse {
/**
* Creates a new `FileResponse` object.
* @param {string|URL} filePath
*/
constructor(filePath) {
this.filePath = filePath;
this.headers = new Headers();
this.exists = fs.existsSync(filePath);
if (this.exists) {
this.status = 200;
this.statusText = 'OK';
let stats = fs.statSync(filePath);
this.headers.set('content-length', stats.size.toString());
this.updateContentType();
let self = this;
this.body = new ReadableStream({
start(controller) {
self.arrayBuffer().then(buffer => {
controller.enqueue(new Uint8Array(buffer));
controller.close();
})
}
});
} else {
this.status = 404;
this.statusText = 'Not Found';
this.body = null;
}
}
/**
* Updates the 'content-type' header property of the response based on the extension of
* the file specified by the filePath property of the current object.
* @returns {void}
*/
updateContentType() {
// Set content-type header based on file extension
const extension = this.filePath.toString().split('.').pop().toLowerCase();
this.headers.set('content-type', CONTENT_TYPE_MAP[extension] ?? 'application/octet-stream');
}
/**
* Clone the current FileResponse object.
* @returns {FileResponse} A new FileResponse object with the same properties as the current object.
*/
clone() {
let response = new FileResponse(this.filePath);
response.exists = this.exists;
response.status = this.status;
response.statusText = this.statusText;
response.headers = new Headers(this.headers);
return response;
}
/**
* Reads the contents of the file specified by the filePath property and returns a Promise that
* resolves with an ArrayBuffer containing the file's contents.
* @returns {Promise<ArrayBuffer>} A Promise that resolves with an ArrayBuffer containing the file's contents.
* @throws {Error} If the file cannot be read.
*/
async arrayBuffer() {
const data = await fs.promises.readFile(this.filePath);
return /** @type {ArrayBuffer} */ (data.buffer);
}
/**
* Reads the contents of the file specified by the filePath property and returns a Promise that
* resolves with a Blob containing the file's contents.
* @returns {Promise<Blob>} A Promise that resolves with a Blob containing the file's contents.
* @throws {Error} If the file cannot be read.
*/
async blob() {
const data = await fs.promises.readFile(this.filePath);
return new Blob([data], { type: this.headers.get('content-type') });
}
/**
* Reads the contents of the file specified by the filePath property and returns a Promise that
* resolves with a string containing the file's contents.
* @returns {Promise<string>} A Promise that resolves with a string containing the file's contents.
* @throws {Error} If the file cannot be read.
*/
async text() {
const data = await fs.promises.readFile(this.filePath, 'utf8');
return data;
}
/**
* Reads the contents of the file specified by the filePath property and returns a Promise that
* resolves with a parsed JavaScript object containing the file's contents.
*
* @returns {Promise<Object>} A Promise that resolves with a parsed JavaScript object containing the file's contents.
* @throws {Error} If the file cannot be read.
*/
async json() {
return JSON.parse(await this.text());
}
}
/**
* Determines whether the given string is a valid URL.
* @param {string|URL} string The string to test for validity as an URL.
* @param {string[]} [protocols=null] A list of valid protocols. If specified, the protocol must be in this list.
* @param {string[]} [validHosts=null] A list of valid hostnames. If specified, the URL's hostname must be in this list.
* @returns {boolean} True if the string is a valid URL, false otherwise.
*/
function isValidUrl(string, protocols = null, validHosts = null) {
let url;
try {
url = new URL(string);
} catch (_) {
return false;
}
if (protocols && !protocols.includes(url.protocol)) {
return false;
}
if (validHosts && !validHosts.includes(url.hostname)) {
return false;
}
return true;
}
/**
* Helper function to get a file, using either the Fetch API or FileSystem API.
*
* @param {URL|string} urlOrPath The URL/path of the file to get.
* @returns {Promise<FileResponse|Response>} A promise that resolves to a FileResponse object (if the file is retrieved using the FileSystem API), or a Response object (if the file is retrieved using the Fetch API).
*/
export async function getFile(urlOrPath) {
if (env.useFS && !isValidUrl(urlOrPath, ['http:', 'https:', 'blob:'])) {
return new FileResponse(urlOrPath);
} else if (typeof process !== 'undefined' && process?.release?.name === 'node') {
const IS_CI = !!process.env?.TESTING_REMOTELY;
const version = env.version;
const headers = new Headers();
headers.set('User-Agent', `transformers.js/${version}; is_ci/${IS_CI};`);
// Check whether we are making a request to the Hugging Face Hub.
const isHFURL = isValidUrl(urlOrPath, ['http:', 'https:'], ['huggingface.co', 'hf.co']);
if (isHFURL) {
// If an access token is present in the environment variables,
// we add it to the request headers.
// NOTE: We keep `HF_ACCESS_TOKEN` for backwards compatibility (as a fallback).
const token = process.env?.HF_TOKEN ?? process.env?.HF_ACCESS_TOKEN;
if (token) {
headers.set('Authorization', `Bearer ${token}`);
}
}
return fetch(urlOrPath, { headers });
} else {
// Running in a browser-environment, so we use default headers
// NOTE: We do not allow passing authorization headers in the browser,
// since this would require exposing the token to the client.
return fetch(urlOrPath);
}
}
const ERROR_MAPPING = {
// 4xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses)
400: 'Bad request error occurred while trying to load file',
401: 'Unauthorized access to file',
403: 'Forbidden access to file',
404: 'Could not locate file',
408: 'Request timeout error occurred while trying to load file',
// 5xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses)
500: 'Internal server error error occurred while trying to load file',
502: 'Bad gateway error occurred while trying to load file',
503: 'Service unavailable error occurred while trying to load file',
504: 'Gateway timeout error occurred while trying to load file',
}
/**
* Helper method to handle fatal errors that occur while trying to load a file from the Hugging Face Hub.
* @param {number} status The HTTP status code of the error.
* @param {string} remoteURL The URL of the file that could not be loaded.
* @param {boolean} fatal Whether to raise an error if the file could not be loaded.
* @returns {null} Returns `null` if `fatal = true`.
* @throws {Error} If `fatal = false`.
*/
function handleError(status, remoteURL, fatal) {
if (!fatal) {
// File was not loaded correctly, but it is optional.
// TODO in future, cache the response?
return null;
}
const message = ERROR_MAPPING[status] ?? `Error (${status}) occurred while trying to load file`;
throw Error(`${message}: "${remoteURL}".`);
}
class FileCache {
/**
* Instantiate a `FileCache` object.
* @param {string} path
*/
constructor(path) {
this.path = path;
}
/**
* Checks whether the given request is in the cache.
* @param {string} request
* @returns {Promise<FileResponse | undefined>}
*/
async match(request) {
let filePath = path.join(this.path, request);
let file = new FileResponse(filePath);
if (file.exists) {
return file;
} else {
return undefined;
}
}
/**
* Adds the given response to the cache.
* @param {string} request
* @param {Response|FileResponse} response
* @returns {Promise<void>}
*/
async put(request, response) {
const buffer = Buffer.from(await response.arrayBuffer());
let outputPath = path.join(this.path, request);
try {
await fs.promises.mkdir(path.dirname(outputPath), { recursive: true });
await fs.promises.writeFile(outputPath, buffer);
} catch (err) {
console.warn('An error occurred while writing the file to cache:', err)
}
}
// TODO add the rest?
// addAll(requests: RequestInfo[]): Promise<void>;
// delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<boolean>;
// keys(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Request>>;
// match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<Response | undefined>;
// matchAll(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Response>>;
}
/**
*
* @param {FileCache|Cache} cache The cache to search
* @param {string[]} names The names of the item to search for
* @returns {Promise<FileResponse|Response|undefined>} The item from the cache, or undefined if not found.
*/
async function tryCache(cache, ...names) {
for (let name of names) {
try {
let result = await cache.match(name);
if (result) return result;
} catch (e) {
continue;
}
}
return undefined;
}
/**
*
* Retrieves a file from either a remote URL using the Fetch API or from the local file system using the FileSystem API.
* If the filesystem is available and `env.useCache = true`, the file will be downloaded and cached.
*
* @param {string} path_or_repo_id This can be either:
* - a string, the *model id* of a model repo on huggingface.co.
* - a path to a *directory* potentially containing the file.
* @param {string} filename The name of the file to locate in `path_or_repo`.
* @param {boolean} [fatal=true] Whether to throw an error if the file is not found.
* @param {PretrainedOptions} [options] An object containing optional parameters.
*
* @throws Will throw an error if the file is not found and `fatal` is true.
* @returns {Promise<Uint8Array>} A Promise that resolves with the file content as a buffer.
*/
export async function getModelFile(path_or_repo_id, filename, fatal = true, options = {}) {
if (!env.allowLocalModels) {
// User has disabled local models, so we just make sure other settings are correct.
if (options.local_files_only) {
throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).")
} else if (!env.allowRemoteModels) {
throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.")
}
}
// Initiate file retrieval
dispatchCallback(options.progress_callback, {
status: 'initiate',
name: path_or_repo_id,
file: filename
})
// First, check if the a caching backend is available
// If no caching mechanism available, will download the file every time
let cache;
if (!cache && env.useBrowserCache) {
if (typeof caches === 'undefined') {
throw Error('Browser cache is not available in this environment.')
}
try {
// In some cases, the browser cache may be visible, but not accessible due to security restrictions.
// For example, when running an application in an iframe, if a user attempts to load the page in
// incognito mode, the following error is thrown: `DOMException: Failed to execute 'open' on 'CacheStorage':
// An attempt was made to break through the security policy of the user agent.`
// So, instead of crashing, we just ignore the error and continue without using the cache.
cache = await caches.open('transformers-cache');
} catch (e) {
console.warn('An error occurred while opening the browser cache:', e);
}
}
if (!cache && env.useFSCache) {
// TODO throw error if not available
// If `cache_dir` is not specified, use the default cache directory
cache = new FileCache(options.cache_dir ?? env.cacheDir);
}
if (!cache && env.useCustomCache) {
// Allow the user to specify a custom cache system.
if (!env.customCache) {
throw Error('`env.useCustomCache=true`, but `env.customCache` is not defined.')
}
// Check that the required methods are defined:
if (!env.customCache.match || !env.customCache.put) {
throw new Error(
"`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. " +
"For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache"
)
}
cache = env.customCache;
}
const revision = options.revision ?? 'main';
let requestURL = pathJoin(path_or_repo_id, filename);
let localPath = pathJoin(env.localModelPath, requestURL);
let remoteURL = pathJoin(
env.remoteHost,
env.remotePathTemplate
.replaceAll('{model}', path_or_repo_id)
.replaceAll('{revision}', encodeURIComponent(revision)),
filename
);
// Choose cache key for filesystem cache
// When using the main revision (default), we use the request URL as the cache key.
// If a specific revision is requested, we account for this in the cache key.
let fsCacheKey = revision === 'main' ? requestURL : pathJoin(path_or_repo_id, revision, filename);
/** @type {string} */
let cacheKey;
let proposedCacheKey = cache instanceof FileCache ? fsCacheKey : remoteURL;
// Whether to cache the final response in the end.
let toCacheResponse = false;
/** @type {Response|FileResponse|undefined} */
let response;
if (cache) {
// A caching system is available, so we try to get the file from it.
// 1. We first try to get from cache using the local path. In some environments (like deno),
// non-URL cache keys are not allowed. In these cases, `response` will be undefined.
// 2. If no response is found, we try to get from cache using the remote URL or file system cache.
response = await tryCache(cache, localPath, proposedCacheKey);
}
const cacheHit = response !== undefined;
if (response === undefined) {
// Caching not available, or file is not cached, so we perform the request
if (env.allowLocalModels) {
// Accessing local models is enabled, so we try to get the file locally.
// If request is a valid HTTP URL, we skip the local file check. Otherwise, we try to get the file locally.
const isURL = isValidUrl(requestURL, ['http:', 'https:']);
if (!isURL) {
try {
response = await getFile(localPath);
cacheKey = localPath; // Update the cache key to be the local path
} catch (e) {
// Something went wrong while trying to get the file locally.
// NOTE: error handling is done in the next step (since `response` will be undefined)
console.warn(`Unable to load from local path "${localPath}": "${e}"`);
}
} else if (options.local_files_only) {
throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${requestURL}.`);
} else if (!env.allowRemoteModels) {
throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${requestURL}.`);
}
}
if (response === undefined || response.status === 404) {
// File not found locally. This means either:
// - The user has disabled local file access (`env.allowLocalModels=false`)
// - the path is a valid HTTP url (`response === undefined`)
// - the path is not a valid HTTP url and the file is not present on the file system or local server (`response.status === 404`)
if (options.local_files_only || !env.allowRemoteModels) {
// User requested local files only, but the file is not found locally.
if (fatal) {
throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${localPath}".`);
} else {
// File not found, but this file is optional.
// TODO in future, cache the response?
return null;
}
}
// File not found locally, so we try to download it from the remote server
response = await getFile(remoteURL);
if (response.status !== 200) {
return handleError(response.status, remoteURL, fatal);
}
// Success! We use the proposed cache key from earlier
cacheKey = proposedCacheKey;
}
// Only cache the response if:
toCacheResponse =
cache // 1. A caching system is available
&& typeof Response !== 'undefined' // 2. `Response` is defined (i.e., we are in a browser-like environment)
&& response instanceof Response // 3. result is a `Response` object (i.e., not a `FileResponse`)
&& response.status === 200 // 4. request was successful (status code 200)
}
// Start downloading
dispatchCallback(options.progress_callback, {
status: 'download',
name: path_or_repo_id,
file: filename
})
/** @type {Uint8Array} */
let buffer;
if (!options.progress_callback) {
// If no progress callback is specified, we can use the `.arrayBuffer()`
// method to read the response.
buffer = new Uint8Array(await response.arrayBuffer());
} else if (
cacheHit // The item is being read from the cache
&&
typeof navigator !== 'undefined' && /firefox/i.test(navigator.userAgent) // We are in Firefox
) {
// Due to bug in Firefox, we cannot display progress when loading from cache.
// Fortunately, since this should be instantaneous, this should not impact users too much.
buffer = new Uint8Array(await response.arrayBuffer());
// For completeness, we still fire the final progress callback
dispatchCallback(options.progress_callback, {
status: 'progress',
name: path_or_repo_id,
file: filename,
progress: 100,
loaded: buffer.length,
total: buffer.length,
})
} else {
buffer = await readResponse(response, data => {
dispatchCallback(options.progress_callback, {
status: 'progress',
name: path_or_repo_id,
file: filename,
...data,
})
})
}
if (
// Only cache web responses
// i.e., do not cache FileResponses (prevents duplication)
toCacheResponse && cacheKey
&&
// Check again whether request is in cache. If not, we add the response to the cache
(await cache.match(cacheKey) === undefined)
) {
// NOTE: We use `new Response(buffer, ...)` instead of `response.clone()` to handle LFS files
await cache.put(cacheKey, new Response(buffer, {
headers: response.headers
}))
.catch(err => {
// Do not crash if unable to add to cache (e.g., QuotaExceededError).
// Rather, log a warning and proceed with execution.
console.warn(`Unable to add response to browser cache: ${err}.`);
});
}
dispatchCallback(options.progress_callback, {
status: 'done',
name: path_or_repo_id,
file: filename
});
return buffer;
}
/**
* Fetches a JSON file from a given path and file name.
*
* @param {string} modelPath The path to the directory containing the file.
* @param {string} fileName The name of the file to fetch.
* @param {boolean} [fatal=true] Whether to throw an error if the file is not found.
* @param {PretrainedOptions} [options] An object containing optional parameters.
* @returns {Promise<Object>} The JSON data parsed into a JavaScript object.
* @throws Will throw an error if the file is not found and `fatal` is true.
*/
export async function getModelJSON(modelPath, fileName, fatal = true, options = {}) {
let buffer = await getModelFile(modelPath, fileName, fatal, options);
if (buffer === null) {
// Return empty object
return {}
}
let decoder = new TextDecoder('utf-8');
let jsonData = decoder.decode(buffer);
return JSON.parse(jsonData);
}
/**
* Read and track progress when reading a Response object
*
* @param {Response|FileResponse} response The Response object to read
* @param {(data: {progress: number, loaded: number, total: number}) => void} progress_callback The function to call with progress updates
* @returns {Promise<Uint8Array>} A Promise that resolves with the Uint8Array buffer
*/
async function readResponse(response, progress_callback) {
const contentLength = response.headers.get('Content-Length');
if (contentLength === null) {
console.warn('Unable to determine content-length from response headers. Will expand buffer when needed.')
}
let total = parseInt(contentLength ?? '0');
let buffer = new Uint8Array(total);
let loaded = 0;
const reader = response.body.getReader();
async function read() {
const { done, value } = await reader.read();
if (done) return;
let newLoaded = loaded + value.length;
if (newLoaded > total) {
total = newLoaded;
// Adding the new data will overflow buffer.
// In this case, we extend the buffer
let newBuffer = new Uint8Array(total);
// copy contents
newBuffer.set(buffer);
buffer = newBuffer;
}
buffer.set(value, loaded)
loaded = newLoaded;
const progress = (loaded / total) * 100;
// Call your function here
progress_callback({
progress: progress,
loaded: loaded,
total: total,
})
return read();
}
// Actually read
await read();
return buffer;
}
/**
* Joins multiple parts of a path into a single path, while handling leading and trailing slashes.
*
* @param {...string} parts Multiple parts of a path.
* @returns {string} A string representing the joined path.
*/
function pathJoin(...parts) {
// https://stackoverflow.com/a/55142565
parts = parts.map((part, index) => {
if (index) {
part = part.replace(new RegExp('^/'), '');
}
if (index !== parts.length - 1) {
part = part.replace(new RegExp('/$'), '');
}
return part;
})
return parts.join('/');
}
| transformers.js/src/utils/hub.js/0 | {
"file_path": "transformers.js/src/utils/hub.js",
"repo_id": "transformers.js",
"token_count": 10053
} |
import { AutoImageProcessor, DPTFeatureExtractor, DPTImageProcessor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// DPTFeatureExtractor
describe("DPTFeatureExtractor", () => {
const model_id = "Xenova/dpt-hybrid-midas";
/** @type {DPTFeatureExtractor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"grayscale images",
async () => {
const image = await load_cached_image("cats");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 384, 384]);
expect(pixel_values.mean().item()).toBeCloseTo(0.0372855559389454, 6);
expect(original_sizes).toEqual([[480, 640]]);
expect(reshaped_input_sizes).toEqual([[384, 384]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
// DPTImageProcessor
// - tests ensure_multiple_of
// - tests keep_aspect_ratio
// - tests bankers rounding
describe("DPTImageProcessor", () => {
const model_id = "Xenova/depth-anything-small-hf";
/** @type {DPTImageProcessor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"ensure_multiple_of w/ normal rounding",
async () => {
const image = await load_cached_image("cats");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 518, 686]);
expect(pixel_values.mean().item()).toBeCloseTo(0.30337387323379517, 3);
expect(original_sizes).toEqual([[480, 640]]);
expect(reshaped_input_sizes).toEqual([[518, 686]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"ensure_multiple_of w/ bankers rounding",
async () => {
const image = await load_cached_image("checkerboard_64x32");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
// NOTE: without bankers rounding, this would be [1, 3, 266, 518]
expect(pixel_values.dims).toEqual([1, 3, 252, 518]);
expect(pixel_values.mean().item()).toBeCloseTo(0.2267402559518814, 1);
expect(original_sizes).toEqual([[32, 64]]);
expect(reshaped_input_sizes).toEqual([[252, 518]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/dpt/test_image_processing_dpt.js/0 | {
"file_path": "transformers.js/tests/models/dpt/test_image_processing_dpt.js",
"repo_id": "transformers.js",
"token_count": 1111
} |
import { MgpstrProcessor, MgpstrForSceneTextRecognition } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
describe("MgpstrForSceneTextRecognition", () => {
const model_id = "onnx-community/tiny-random-MgpstrForSceneTextRecognition";
/** @type {MgpstrForSceneTextRecognition} */
let model;
/** @type {MgpstrProcessor} */
let processor;
beforeAll(async () => {
model = await MgpstrForSceneTextRecognition.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
processor = await MgpstrProcessor.from_pretrained(model_id);
}, MAX_MODEL_LOAD_TIME);
const TARGETS = {
white_image: {
generated_text: ["mmmmmmmmmmmmmmmmmmmmmmmmmm"],
scores: [3.5553885547065065e-27],
char_preds: ["mmmmmmmmmmmmmmmmmmmmmmmmmm"],
bpe_preds: ["wwwwwwwwwwwwwwwwwwwwwwwwww"],
wp_preds: ["[unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65]"],
},
blue_image: {
generated_text: ["11111111111111111111111111"],
scores: [9.739909092663214e-32],
char_preds: ["11111111111111111111111111"],
bpe_preds: ["22222222222222222222222222"],
wp_preds: ["[unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59]"],
},
};
it(
"batch_size=1",
async () => {
const image_id = "white_image";
const image = await load_cached_image(image_id);
const inputs = await processor(image);
const outputs = await model(inputs);
const { max_token_length, num_character_labels, num_bpe_labels, num_wordpiece_labels } = model.config;
expect(outputs.char_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 38 */ num_character_labels]);
expect(outputs.bpe_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 99 */ num_bpe_labels]);
expect(outputs.wp_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 99 */ num_wordpiece_labels]);
const decoded = processor.batch_decode(outputs.logits);
expect(decoded).toBeCloseToNested(TARGETS[image_id]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"batch_size>1",
async () => {
const image_ids = ["white_image", "blue_image"];
const images = await Promise.all(image_ids.map((image_id) => load_cached_image(image_id)));
const inputs = await processor(images);
const outputs = await model(inputs);
const { max_token_length, num_character_labels, num_bpe_labels, num_wordpiece_labels } = model.config;
expect(outputs.char_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 38 */ num_character_labels]);
expect(outputs.bpe_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 99 */ num_bpe_labels]);
expect(outputs.wp_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 99 */ num_wordpiece_labels]);
const decoded = processor.batch_decode(outputs.logits);
const target = image_ids.reduce((acc, image_id) => {
for (const key in TARGETS[image_id]) (acc[key] ??= []).push(...TARGETS[image_id][key]);
return acc;
}, {});
expect(decoded).toBeCloseToNested(target);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/mgp_str/test_modeling_mgp_str.js/0 | {
"file_path": "transformers.js/tests/models/mgp_str/test_modeling_mgp_str.js",
"repo_id": "transformers.js",
"token_count": 1680
} |
import { AutoProcessor, PaliGemmaProcessor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
const model_id = "hf-internal-testing/tiny-random-PaliGemmaForConditionalGeneration";
describe("PaliGemmaProcessor", () => {
/** @type {PaliGemmaProcessor} */
let processor;
let images = {};
beforeAll(async () => {
processor = await AutoProcessor.from_pretrained(model_id);
images = {
white_image: await load_cached_image("white_image"),
};
}, MAX_PROCESSOR_LOAD_TIME);
it(
"Image-only (default text)",
async () => {
const { input_ids, pixel_values } = await processor(images.white_image);
expect(input_ids.dims).toEqual([1, 258]);
expect(pixel_values.dims).toEqual([1, 3, 224, 224]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"Single image & text",
async () => {
const { input_ids, pixel_values } = await processor(images.white_image, "<image>What is on the flower?");
expect(input_ids.dims).toEqual([1, 264]);
expect(pixel_values.dims).toEqual([1, 3, 224, 224]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"Multiple images & text",
async () => {
const { input_ids, pixel_values } = await processor([images.white_image, images.white_image], "<image><image>Describe the images.");
expect(input_ids.dims).toEqual([1, 518]);
expect(pixel_values.dims).toEqual([2, 3, 224, 224]);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/paligemma/test_processor_paligemma.js/0 | {
"file_path": "transformers.js/tests/models/paligemma/test_processor_paligemma.js",
"repo_id": "transformers.js",
"token_count": 709
} |
import { pipeline, AutomaticSpeechRecognitionPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
const PIPELINE_ID = "automatic-speech-recognition";
export default () => {
describe("Automatic Speech Recognition", () => {
describe("whisper", () => {
const model_id = "Xenova/tiny-random-WhisperForConditionalGeneration";
const SAMPLING_RATE = 16000;
const audios = [new Float32Array(SAMPLING_RATE).fill(0), Float32Array.from({ length: SAMPLING_RATE }, (_, i) => i / 16000)];
const long_audios = [new Float32Array(SAMPLING_RATE * 60).fill(0), Float32Array.from({ length: SAMPLING_RATE * 60 }, (_, i) => (i % 1000) / 1000)];
const max_new_tokens = 5;
/** @type {AutomaticSpeechRecognitionPipeline} */
let pipe;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of AutomaticSpeechRecognitionPipeline", () => {
expect(pipe).toBeInstanceOf(AutomaticSpeechRecognitionPipeline);
});
describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(audios[0], { max_new_tokens });
const target = { text: "àžàž°àžàž°àžàž°àžàž°URURUR" };
expect(output).toEqual(target);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"transcribe w/ return_timestamps=true",
async () => {
const output = await pipe(audios[0], { return_timestamps: true, max_new_tokens });
const target = {
text: " riceUR",
chunks: [
{ timestamp: [0.72, 17.72], text: " rice" },
{ timestamp: [17.72, null], text: "UR" },
],
};
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
// TODO add: transcribe w/ return_timestamps="word"
// it(
// "transcribe w/ word-level timestamps",
// async () => {
// const output = await pipe(audios[0], { return_timestamps: "word", max_new_tokens });
// const target = [];
// expect(output).toBeCloseToNested(target, 5);
// },
// MAX_TEST_EXECUTION_TIME,
// );
it(
"transcribe w/ language",
async () => {
const output = await pipe(audios[0], { language: "french", task: "transcribe", max_new_tokens });
const target = { text: "àžàž°àžàž°àžàž°àžàž°URURUR" };
expect(output).toEqual(target);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"translate",
async () => {
const output = await pipe(audios[0], { language: "french", task: "translate", max_new_tokens });
const target = { text: "àžàž°àžàž°àžàž°àžàž°URURUR" };
expect(output).toEqual(target);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"audio > 30 seconds",
async () => {
const output = await pipe(long_audios[0], { chunk_length_s: 30, stride_length_s: 5, max_new_tokens });
const target = { text: "àžàž°àžàž°àžàž°àžàž°URURUR" };
expect(output).toEqual(target);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
describe("wav2vec2", () => {
const model_id = "Xenova/tiny-random-Wav2Vec2ForCTC-ONNX";
const SAMPLING_RATE = 16000;
const audios = [new Float32Array(SAMPLING_RATE).fill(0), Float32Array.from({ length: SAMPLING_RATE }, (_, i) => i / 16000)];
const long_audios = [new Float32Array(SAMPLING_RATE * 60).fill(0), Float32Array.from({ length: SAMPLING_RATE * 60 }, (_, i) => (i % 1000) / 1000)];
const max_new_tokens = 5;
/** @type {AutomaticSpeechRecognitionPipeline} */
let pipe;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of AutomaticSpeechRecognitionPipeline", () => {
expect(pipe).toBeInstanceOf(AutomaticSpeechRecognitionPipeline);
});
describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(audios[0], { max_new_tokens });
const target = { text: "<unk>K" };
expect(output).toEqual(target);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
});
};
| transformers.js/tests/pipelines/test_pipelines_automatic_speech_recognition.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_automatic_speech_recognition.js",
"repo_id": "transformers.js",
"token_count": 2373
} |
import { pipeline, TextToAudioPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
const PIPELINE_ID = "text-to-audio";
export default () => {
describe("Text to Audio", () => {
const model_id = "Xenova/tiny-random-vits";
/** @type {TextToAudioPipeline} */
let pipe;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of TextToAudioPipeline", () => {
expect(pipe).toBeInstanceOf(TextToAudioPipeline);
});
it(
"default",
async () => {
const output = await pipe("hello");
expect(output.audio).toHaveLength(6400);
// NOTE: The mean value is not deterministic, so we just check the first few digits
expect(output.audio.reduce((a, b) => a + b, 0) / output.audio.length).toBeCloseTo(-0.0125, 2);
expect(output.sampling_rate).toEqual(16000);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/pipelines/test_pipelines_text_to_audio.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_text_to_audio.js",
"repo_id": "transformers.js",
"token_count": 492
} |
import { Tensor, cat, stack, layer_norm, ones_like, zeros_like, full_like, rand, std_mean } from "../../src/transformers.js";
import { init } from "../init.js";
import { compare } from "../test_utils.js";
init();
describe("Tensor operations", () => {
describe("cat", () => {
it("should concatenate on dim=0", () => {
const t1 = new Tensor("float32", [1, 2, 3], [1, 3]);
const t2 = new Tensor("float32", [4, 5, 6, 7, 8, 9], [2, 3]);
const t3 = new Tensor("float32", [10, 11, 12], [1, 3]);
const target1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9], [3, 3]);
const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 3]);
// 2 tensors
const concatenated1 = cat([t1, t2], 0);
compare(concatenated1, target1, 1e-3);
// 3 tensors
const concatenated2 = cat([t1, t2, t3], 0);
compare(concatenated2, target2, 1e-3);
});
it("should concatenate on dim=1", () => {
const t1 = new Tensor("float32", [1, 2, 3, -1, -2, -3], [2, 3, 1]);
const t2 = new Tensor("float32", [4, -4], [2, 1, 1]);
const t3 = new Tensor("float32", [5, 6, -5, -6], [2, 2, 1]);
const target1 = new Tensor("float32", [1, 2, 3, 4, -1, -2, -3, -4], [2, 4, 1]);
const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, -1, -2, -3, -4, -5, -6], [2, 6, 1]);
// 2 tensors
const concatenated1 = cat([t1, t2], 1);
compare(concatenated1, target1, 1e-3);
// 3 tensors
const concatenated2 = cat([t1, t2, t3], 1);
compare(concatenated2, target2, 1e-3);
});
it("should concatenate on dim=-2", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16], [2, 1, 3, 2]);
const t2 = new Tensor("float32", [7, 8, 9, 10, 17, 18, 19, 20], [2, 1, 2, 2]);
const target = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [2, 1, 5, 2]);
const concatenated = cat([t1, t2], -2);
compare(concatenated, target, 1e-3);
});
// TODO add tests for errors
});
describe("slice", () => {
it("should return a given row dim", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]);
const t2 = t1.slice(1);
const target = new Tensor("float32", [3, 4], [2]);
compare(t2, target);
});
it("should return a range of rows", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]);
const t2 = t1.slice([1, 3]);
const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]);
compare(t2, target);
});
it("should return a crop", () => {
const t1 = new Tensor(
"float32",
Array.from({ length: 28 }, (_, i) => i + 1),
[4, 7],
);
const t2 = t1.slice([1, -1], [1, -1]);
const target = new Tensor("float32", [9, 10, 11, 12, 13, 16, 17, 18, 19, 20], [2, 5]);
compare(t2, target);
});
});
describe("stack", () => {
const t1 = new Tensor("float32", [0, 1, 2, 3, 4, 5], [1, 3, 2]);
it("should stack on dim=0", () => {
const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [2, 1, 3, 2]);
const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [3, 1, 3, 2]);
// 2 tensors
const stacked1 = stack([t1, t1], 0);
compare(stacked1, target1, 1e-3);
// 3 tensors
const stacked2 = stack([t1, t1, t1], 0);
compare(stacked2, target2, 1e-3);
});
it("should stack on dim=1", () => {
const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 2, 3, 2]);
const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 3, 3, 2]);
// 2 tensors
const stacked1 = stack([t1, t1], 1);
compare(stacked1, target1, 1e-3);
// 3 tensors
const stacked2 = stack([t1, t1, t1], 1);
compare(stacked2, target2, 1e-3);
});
it("should stack on dim=-1", () => {
const target1 = new Tensor("float32", [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5], [1, 3, 2, 2]);
const target2 = new Tensor("float32", [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5], [1, 3, 2, 3]);
// 2 tensors
const stacked1 = stack([t1, t1], -1);
compare(stacked1, target1, 1e-3);
// 3 tensors
const stacked2 = stack([t1, t1, t1], -1);
compare(stacked2, target2, 1e-3);
});
});
describe("permute", () => {
it("should permute", () => {
const x = new Tensor("float32", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [2, 3, 4]);
// Permute axes to (0, 1, 2) - No change
const permuted_1 = x.permute(0, 1, 2);
const target_1 = x;
compare(permuted_1, target_1, 1e-3);
// Permute axes to (0, 2, 1)
const permuted_2 = x.permute(0, 2, 1);
const target_2 = new Tensor("float32", [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23], [2, 4, 3]);
compare(permuted_2, target_2, 1e-3);
// Permute axes to (1, 0, 2)
const permuted_3 = x.permute(1, 0, 2);
const target_3 = new Tensor("float32", [0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23], [3, 2, 4]);
compare(permuted_3, target_3, 1e-3);
// Permute axes to (1, 2, 0)
const permuted_4 = x.permute(1, 2, 0);
const target_4 = new Tensor("float32", [0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23], [3, 4, 2]);
compare(permuted_4, target_4, 1e-3);
// Permute axes to (2, 0, 1)
const permuted_5 = x.permute(2, 0, 1);
const target_5 = new Tensor("float32", [0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23], [4, 2, 3]);
compare(permuted_5, target_5, 1e-3);
// Permute axes to (2, 1, 0)
const permuted_6 = x.permute(2, 1, 0);
const target_6 = new Tensor("float32", [0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23], [4, 3, 2]);
compare(permuted_6, target_6, 1e-3);
});
});
describe("map", () => {
it("should double", () => {
const original = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [2, 4, 6, 8, 10, 12], [2, 3]);
const doubled = original.map((x) => x * 2);
compare(doubled, target, 1e-3);
});
});
describe("mean", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3, 1]);
it("should calculate mean over the entire tensor", () => {
const target = new Tensor("float32", [3.5], []);
compare(t1.mean(), target, 1e-3);
});
it("should calculate mean over dimension 0", () => {
const target0 = new Tensor("float32", [2.5, 3.5, 4.5], [3, 1]);
compare(t1.mean(0), target0, 1e-3);
});
it("should calculate mean over dimension 1", () => {
const target1 = new Tensor("float32", [2, 5], [2, 1]);
compare(t1.mean(1), target1, 1e-3);
});
it("should calculate mean over dimension -1", () => {
const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
compare(t1.mean(-1), target2, 1e-3);
});
});
describe("std_mean", () => {
it("should return std_mean for the entire tensor", () => {
const t = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const [stdVal, meanVal] = std_mean(t);
compare(stdVal, new Tensor("float32", [1.8708287477493286], []), 1e-3);
compare(meanVal, new Tensor("float32", [3.5], []), 1e-3);
});
});
describe("min", () => {
it("should return the minimum over the entire tensor", () => {
const t1 = new Tensor("float32", [3, -2, 5, 0], [2, 2]);
const target = new Tensor("float32", [-2], []);
const result = t1.min();
compare(result, target, 1e-3);
});
it("should return the minimum over dimension 1", () => {
const t2 = new Tensor("float32", [4, 2, -1, 0, 6, 5], [3, 2]);
const target = new Tensor("float32", [2, -1, 5], [3]);
const result = t2.min(1);
compare(result, target, 1e-3);
});
});
describe("max", () => {
it("should return the maximum over the entire tensor", () => {
const t1 = new Tensor("float32", [3, 10, -2, 7], [2, 2]);
const target = new Tensor("float32", [10], []);
const result = t1.max();
compare(result, target, 1e-3);
});
it("should return the maximum over dimension 0", () => {
const t2 = new Tensor("float32", [1, 2, 4, 5, 9, 3], [3, 2]);
const target = new Tensor("float32", [9, 5], [2]);
const result = t2.max(0);
compare(result, target, 1e-3);
});
});
describe("sum", () => {
it("should calculate sum over entire tensor", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [21], []);
const result = t1.sum();
compare(result, target, 1e-3);
});
it("should calculate sum over dimension 0", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [5, 7, 9], [3]);
const result = t1.sum(0);
compare(result, target, 1e-3);
});
it("should calculate sum over dimension 1", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [6, 15], [2]);
const result = t1.sum(1);
compare(result, target, 1e-3);
});
});
describe("norm", () => {
it("should calculate L2 norm over entire tensor", () => {
const t1 = new Tensor("float32", [3, 4], [2]);
const target = new Tensor("float32", [5], []);
const result = t1.norm();
compare(result, target, 1e-3);
});
it("should calculate L2 norm over dimension 0", () => {
const t1 = new Tensor("float32", [3, 4, 6, 8], [2, 2]);
const target = new Tensor("float32", [6.7082, 8.9443], [2]);
const result = t1.norm(2, 0);
compare(result, target, 1e-2);
});
});
describe("normalize", () => {
it("should normalize a vector correctly", () => {
const t1 = new Tensor("float32", [3, 4], [1, 2]);
const target = new Tensor("float32", [0.6, 0.8], [1, 2]);
const normalized = t1.normalize();
compare(normalized, target, 1e-3);
});
it("should normalize along dimension", () => {
const t1 = new Tensor("float32", [1, 2, 2, 3], [2, 2]);
const target = new Tensor("float32", [0.4472, 0.8944, 0.5547, 0.8321], [2, 2]);
const normalized = t1.normalize();
compare(normalized, target, 1e-3);
});
});
describe("layer_norm", () => {
it("should calculate layer norm", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [-1.2247356176376343, 0.0, 1.2247356176376343, -1.2247357368469238, -1.1920928955078125e-7, 1.2247354984283447], [2, 3]);
const norm = layer_norm(t1, [t1.dims.at(-1)]);
compare(norm, target, 1e-3);
});
});
describe("sigmoid", () => {
it("should apply the sigmoid function to each element in the tensor", () => {
const t1 = new Tensor("float32", [0, 1, -1, 5, -5], [5]);
const target = new Tensor("float32", [0.5, 1 / (1 + Math.exp(-1)), 1 / (1 + Math.exp(1)), 1 / (1 + Math.exp(-5)), 1 / (1 + Math.exp(5))], [5]);
const result = t1.sigmoid();
compare(result, target, 1e-3);
});
});
describe("tolist", () => {
it("should return nested arrays for a 2D tensor", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const arr = t1.tolist();
compare(arr, [
[1, 2],
[3, 4],
]);
});
});
describe("mul", () => {
it("should multiply constant", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const target = new Tensor("float32", [2, 4, 6, 8], [2, 2]);
const result = t1.mul(2);
compare(result, target, 1e-3);
});
});
describe("div", () => {
it("should divide constant", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const target = new Tensor("float32", [0.5, 1, 1.5, 2], [2, 2]);
const result = t1.div(2);
compare(result, target, 1e-3);
});
});
describe("add", () => {
it("should add constant", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]);
const result = t1.add(2);
compare(result, target, 1e-3);
});
});
describe("sub", () => {
it("should subtract constant", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const target = new Tensor("float32", [-1, 0, 1, 2], [2, 2]);
const result = t1.sub(2);
compare(result, target, 1e-3);
});
});
describe("gt", () => {
it("should perform element-wise greater than comparison with a scalar", () => {
const t1 = new Tensor("float32", [1, 5, 3, 7], [4]);
const target = new Tensor("bool", [0, 1, 0, 1], [4]);
const result = t1.gt(4);
compare(result, target, 1e-3);
});
});
describe("lt", () => {
it("should perform element-wise less than comparison with a scalar", () => {
const t1 = new Tensor("float32", [1, 5, 3, 7], [4]);
const target = new Tensor("bool", [1, 0, 1, 0], [4]);
const result = t1.lt(4);
compare(result, target, 1e-3);
});
});
describe("squeeze", () => {
it("should remove all dimensions of size 1", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 4]);
const target = new Tensor("float32", [1, 2, 3, 4], [4]);
const result = t1.squeeze();
compare(result, target, 1e-3);
});
it("should remove a specified dimension", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 2]);
const result = t1.squeeze(1);
const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]);
compare(result, target, 1e-3);
});
it("should remove multiple dimensions", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 1, 2]);
const result = t1.squeeze([0, 3]);
const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]);
compare(result, target, 1e-3);
});
});
describe("unsqueeze", () => {
it("should add a dimension at the specified axis", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [4]);
const target = new Tensor("float32", [1, 2, 3, 4], [1, 4]);
const result = t1.unsqueeze(0);
compare(result, target, 1e-3);
});
});
describe("flatten", () => {
it("should flatten a 2D tensor into 1D by default", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [6]);
const result = t1.flatten();
compare(result, target, 1e-3);
});
});
describe("neg", () => {
it("should compute the negative of each element in the tensor", () => {
const t1 = new Tensor("float32", [1, -2, 0, 3], [4]);
const target = new Tensor("float32", [-1, 2, -0, -3], [4]);
const result = t1.neg();
compare(result, target, 1e-3);
});
});
describe("view", () => {
it("should reshape the tensor to the specified dimensions", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]);
const result = t1.view(3, 2);
compare(result, target, 1e-3);
});
it("should reshape the tensor with an inferred dimension (-1)", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [1, 6]);
const result = t1.view(1, -1);
compare(result, target, 1e-3);
});
it("should throw if multiple inferred dimensions are used", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
expect(() => t1.view(-1, -1)).toThrow();
});
});
describe("clamp", () => {
it("should clamp values between min and max", () => {
const t1 = new Tensor("float32", [-2, -1, 0, 1, 2, 3], [6]);
const target = new Tensor("float32", [-1, -1, 0, 1, 2, 2], [6]);
const result = t1.clamp(-1, 2);
compare(result, target, 1e-3);
});
});
describe("round", () => {
it("should round elements to the nearest integer", () => {
const t1 = new Tensor("float32", [0.1, 1.4, 2.5, 3.9, -1.2], [5]);
const target = new Tensor("float32", [0, 1, 3, 4, -1], [5]);
const result = t1.round();
compare(result, target, 1e-3);
});
});
describe("ones_like", () => {
it("should create a tensor of all ones with the same shape as the input", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const result = ones_like(t1);
const target = new Tensor("int64", [1n, 1n, 1n, 1n], [2, 2]);
compare(result, target, 1e-3);
});
});
describe("zeros_like", () => {
it("should create a tensor of all zeros with the same shape as the input", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const result = zeros_like(t1);
const target = new Tensor("int64", [0n, 0n, 0n, 0n], [2, 2]);
compare(result, target, 1e-3);
});
});
describe("full_like", () => {
it("should create a tensor filled with a number, matching the shape of the original", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]);
const result = full_like(t1, 10);
const target = new Tensor("float32", [10, 10, 10, 10], [2, 2]);
compare(result, target, 1e-3);
});
it("should create a boolean tensor with the same shape", () => {
const t2 = new Tensor("bool", [true, false], [2]);
const result = full_like(t2, true);
const target = new Tensor("bool", [true, true], [2]);
compare(result, target, 1e-3);
});
it("should create a bigint tensor with the same shape", () => {
const t3 = new Tensor("int64", [1n, 2n], [2]);
const result = full_like(t3, 123n);
const target = new Tensor("int64", [123n, 123n], [2]);
compare(result, target, 1e-3);
});
});
describe("rand", () => {
it("should create a tensor of random values between 0 and 1 with the given shape", () => {
const shape = [2, 2];
const random = rand(shape);
expect(random.type).toBe("float32");
expect(random.dims).toEqual(shape);
random.data.forEach((val) => {
expect(val).toBeGreaterThanOrEqual(0);
expect(val).toBeLessThan(1);
});
});
});
describe("to", () => {
it("float32 to int32 (number to number)", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("int32", [1, 2, 3, 4, 5, 6], [2, 3]);
const t2 = t1.to("int32");
compare(t2, target);
});
it("float32 to int64 (number to bigint)", () => {
const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const target = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]);
const t2 = t1.to("int64");
compare(t2, target);
});
it("int64 to float32 (bigint to number)", () => {
const t1 = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]);
const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]);
const t2 = t1.to("float32");
compare(t2, target);
});
it("int32 to uint32", () => {
const t1 = new Tensor("int32", [-1, 2, -3, 4, -5, 6], [2, 3]);
const target = new Tensor("uint32", [4294967295, 2, 4294967293, 4, 4294967291, 6], [2, 3]);
const t2 = t1.to("uint32");
compare(t2, target);
});
it("int16 to int8 (overflow)", () => {
const t1 = new Tensor("int16", [0, 1, 128, 256, 257, 512], [2, 3]);
const target = new Tensor("int8", [0, 1, -128, 0, 1, 0], [2, 3]);
const t2 = t1.to("int8");
compare(t2, target);
});
});
});
| transformers.js/tests/utils/tensor.test.js/0 | {
"file_path": "transformers.js/tests/utils/tensor.test.js",
"repo_id": "transformers.js",
"token_count": 9122
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run benchmark using the `optimum-benchmark` library with some customization in `transformers`.
Assume we are under `transformers` root directory: (make sure the commits are valid commits)
```bash
python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun
```
"""
import argparse
import glob
import json
import os.path
import re
import tempfile
from contextlib import contextmanager
from pathlib import Path
from git import Repo
from huggingface_hub import HfApi
from optimum_benchmark import Benchmark
from optimum_benchmark_wrapper import main
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
@contextmanager
def checkout_commit(repo: Repo, commit_id: str):
"""
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.
Args:
repo (`git.Repo`): A git repository (for instance the Transformers repo).
commit_id (`str`): The commit reference to checkout inside the context manager.
"""
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
try:
repo.git.checkout(commit_id)
yield
finally:
repo.git.checkout(current_head)
def summarize(run_dir, metrics, expand_metrics=False):
"""Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`.
Each summary's format is as follows (for `expand_metrics=False`):
```
{
"model": "google/gemma-2b",
"commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7",
"config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5",
"metrics": {
"decode.latency.mean": 1.624666809082031,
"per_token.latency.mean": 0.012843788806628804,
"per_token.throughput.value": 77.85864553330948
}
}
```
"""
reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True)
report_dirs = [str(Path(report).parent) for report in reports]
summaries = []
for report_dir in report_dirs:
commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0]
if not os.path.isfile(os.path.join(report_dir, "benchmark.json")):
continue
benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json"))
report = benchmark.report
model = benchmark.config.backend["model"]
# Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`.
# (we rely on the usage of hydra's `${hydra.job.override_dirname}`.)
benchmark_name = re.sub(f"backend.model={model},*", "", report_dir)
benchmark_name = str(Path(benchmark_name).parts[-1])
if benchmark_name.startswith("commit="):
benchmark_name = benchmark.config.name
metrics_values = {}
# post-processing of report: show a few selected/important metric
for metric in metrics:
keys = metric.split(".")
value = report.to_dict()
current = metrics_values
for key in keys:
# Avoid KeyError when a user's specified metric has typo.
# TODO: Give warnings.
if key not in value:
continue
value = value[key]
if expand_metrics:
if isinstance(value, dict):
if key not in current:
current[key] = {}
current = current[key]
else:
current[key] = value
if not expand_metrics:
metrics_values[metric] = value
# show some config information
print(f"model: {model}")
print(f"commit: {commit}")
print(f"config: {benchmark_name}")
if len(metrics_values) > 0:
print("metrics:")
if expand_metrics:
print(metrics_values)
else:
for metric, value in metrics_values.items():
print(f" - {metric}: {value}")
print("-" * 80)
summary = {
"model": model,
"commit": commit,
"config": benchmark_name,
"metrics": metrics_values,
}
summaries.append(summary)
with open(os.path.join(report_dir, "summary.json"), "w") as fp:
json.dump(summary, fp, indent=4)
return summaries
def combine_summaries(summaries):
"""Combine a list of summary obtained from the function `summarize`.
The combined summary's format is as follows:
```
"google/gemma-2b": {
"benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": {
"3cd6ed22e4d49219f300f5055e71e3929aba20d7": {
"metrics": {"decode.latency.mean": 1.624666809082031}
},
"c97ee28b117c0abe8e08891f402065e4df6d72aa": {
"metrics": {"decode.latency.mean": 1.6278163452148438}
}
},
"benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": {
"3cd6ed22e4d49219f300f5055e71e3929aba20d7": {
"metrics": {"decode.latency.mean": 1.6947791748046876}
},
"c97ee28b117c0abe8e08891f402065e4df6d72aa": {
"metrics": {
"decode.latency.mean": 1.6980519409179688}
}
}
}
```
"""
combined = {}
for summary in summaries:
model = summary["model"]
config = summary["config"]
commit = summary["commit"]
if model not in combined:
combined[model] = {}
if config not in combined[model]:
combined[model][config] = {}
if commit not in combined[model][config]:
combined[model][config][commit] = {"metrics": summary["metrics"]}
with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp:
json.dump(combined, fp, indent=4)
print(json.dumps(combined, indent=4))
return combined
if __name__ == "__main__":
def list_str(values):
return values.split(",")
parser = argparse.ArgumentParser()
parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.")
parser.add_argument("--config-name", type=str, required=True, help="The config name.")
# arguments specific to this wrapper for our own customization
parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.")
parser.add_argument(
"--commit",
type=list_str,
default="",
help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.",
)
parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.")
parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.")
parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.")
parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).")
args, optimum_benchmark_args = parser.parse_known_args()
repo = Repo(PATH_TO_REPO)
metrics = [
"prefill.latency.mean",
"prefill.throughput.value",
"decode.latency.mean",
"decode.throughput.value",
"per_token.latency.mean",
"per_token.throughput.value",
]
if args.metrics is not None:
metrics = args.metrics.split(",")
# Get `backend.model` in a hacky way: We want to control the experiment flow manually.
models = [""]
for idx, arg in enumerate(optimum_benchmark_args):
if arg.startswith("backend.model="):
models = arg[len("backend.model=") :]
models = models.split(",")
break
optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")]
# Get the commit(s)
current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref)
commits = [x for x in args.commit if x != ""]
if len(commits) == 0:
commits = [current_head]
elif len(commits) == 1 and commits[0] == "diff":
# compare to `main`
commits = ["main", current_head]
# Get the specified run directory
run_dir_arg_idx, run_dir = -1, None
sweep_dir_arg_idx, sweep_dir = -1, None
for idx, arg in enumerate(optimum_benchmark_args):
if arg.startswith("hydra.run.dir="):
run_dir = arg[len("hydra.run.dir=") :]
run_dir_arg_idx = idx
elif arg.startswith("hydra.sweep.dir="):
sweep_dir = arg[len("hydra.sweep.dir=") :]
sweep_dir_arg_idx = idx
exp_run_dir, arg_dix, arg_name = (
(sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir")
if "--multirun" in optimum_benchmark_args
else (run_dir, run_dir_arg_idx, "hydra.run.dir")
)
# TODO: not hardcoded
if exp_run_dir is None and args.ensure_empty:
exp_run_dir = "_benchmark"
if args.ensure_empty:
os.makedirs(exp_run_dir, exist_ok=True)
exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir)
run_summaries = []
for commit in commits:
with checkout_commit(repo, commit):
commit = str(repo.head.commit)
commit_run_dir = exp_run_dir
if exp_run_dir is not None:
commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}")
print(f"Run benchmark on commit: {commit}")
for model in models:
model_arg = [f"backend.model={model}"] if model != "" else []
dir_args = []
if commit_run_dir is not None:
if arg_dix > -1:
optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}"
else:
dir_args = [
f"hydra.sweep.dir={commit_run_dir}",
f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}",
]
main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args)
if commit_run_dir is not None:
# Need to remove the `\` character
summaries = summarize(commit_run_dir.replace("\\", ""), metrics)
run_summaries.extend(summaries)
# aggregate the information across the commits
if exp_run_dir is not None:
with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp:
json.dump(run_summaries, fp, indent=4)
combined_summary = combine_summaries(run_summaries)
if args.repo_id is not None and args.path_in_repo is not None:
# Upload to Hub
api = HfApi()
api.upload_folder(
folder_path=exp_run_dir,
path_in_repo=args.path_in_repo,
repo_id=args.repo_id,
repo_type="dataset",
token=args.token,
)
| transformers/benchmark/benchmark.py/0 | {
"file_path": "transformers/benchmark/benchmark.py",
"repo_id": "transformers",
"token_count": 5440
} |
FROM python:3.9-slim
ENV PYTHONDONTWRITEBYTECODE=1
ARG REF=main
USER root
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr
ENV UV_PYTHON=/usr/local/bin/python
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
RUN uv pip install --no-cache-dir --no-deps timm accelerate
RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk
# RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels
RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset'
# RUN git clone https://github.com/facebookresearch/detectron2.git
# RUN python3 -m pip install --no-cache-dir -e detectron2
RUN pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3'
RUN pip uninstall -y transformers
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
| transformers/docker/exotic-models.dockerfile/0 | {
"file_path": "transformers/docker/exotic-models.dockerfile",
"repo_id": "transformers",
"token_count": 468
} |
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11
FROM nvcr.io/nvidia/pytorch:23.11-py3
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
# Example: `cu102`, `cu113`, etc.
ARG CUDA='cu121'
RUN apt -y update
RUN apt install -y libaio-dev
RUN python3 -m pip install --no-cache-dir --upgrade pip
ARG REF=main
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
RUN python3 -m pip uninstall -y torch torchvision torchaudio
# Install **nightly** release PyTorch (flag `--pre`)
# (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.)
# (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops)
RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
# Uninstall `transformer-engine` shipped with the base image
RUN python3 -m pip uninstall -y transformer-engine
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
RUN python3 -m pip uninstall -y torch-tensorrt apex
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
RUN python3 -m pip uninstall -y deepspeed
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
# Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010
# RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
## For `torchdynamo` tests
## (see https://github.com/huggingface/transformers/pull/17765)
#RUN git clone https://github.com/pytorch/functorch
#RUN python3 -m pip install --no-cache-dir ./functorch[aot]
#RUN cd functorch && python3 setup.py develop
#
#RUN git clone https://github.com/pytorch/torchdynamo
#RUN python3 -m pip install -r ./torchdynamo/requirements.txt
#RUN cd torchdynamo && python3 setup.py develop
#
## install TensorRT
#RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex
#RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2
#
## install torch_tensorrt (fx path)
#RUN git clone https://github.com/pytorch/TensorRT.git
#RUN cd TensorRT/py && python3 setup.py install --fx-only
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
# Disable for now as deepspeed is not installed above. To be enabled once the issue is fixed.
# RUN python3 -c "from deepspeed.launcher.runner import main"
| transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile",
"repo_id": "transformers",
"token_count": 1032
} |
# تØÙ
ÙÙ ÙÙ
اذج Ù
درؚة Ù
سؚÙÙØ§ ؚاستخداÙ
AutoClass
ÙÙ
ترغؚ ÙÙ Ø¥ÙØŽØ§Ø¡ Ù
ØÙÙ Ù
عÙ
ار٠ÙÙ
؀؎ر Ø§ÙØªØ±Ø§ØšØ· Ø§ÙØ®Ø§Øµ ØšÙØ ÙÙÙØ§Ù Ø§ÙØ¹Ø¯Ùد Ù
Ù Ù
ØÙÙØ§Øª اÙÙ
عÙ
Ø§Ø±ÙØ© اÙÙ
ختÙÙØ© Ø§ÙØªÙ ÙÙ
ÙÙÙ Ø§ÙØ§Ø®ØªÙار Ù
Ù ØšÙÙÙØ§. ÙØ¬Ø²Ø¡ Ù
٠اÙÙÙØ³ÙØ© Ø§ÙØ£Ø³Ø§Ø³ÙØ© ÙÙ ð€ Transformers ÙØ¬Ø¹Ù اÙÙ
ÙØªØšØ© سÙÙØ© ÙØšØ³Ùطة ÙÙ
Ø±ÙØ©Ø ÙØ¥Ù ÙØŠØ© `AutoClass` تستد٠تÙÙØ§ØŠÙÙØ§ ÙØªØÙ
ÙÙ Ø§ÙØšÙÙØ© Ø§ÙØµØÙØØ© Ù
Ù ÙØ³Ø®Ø© ÙÙ
ÙØ°Ø¬ (Model Checkpoint) Ù
عÙÙØ©. تسÙ
Ø Ù٠طرÙÙØ© `from_pretrained()` ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§ ÙØ£Ù ØšÙÙØ© ؚسرعة ØØªÙ ÙØ§ تضطر Ø¥ÙÙ ØªÙØ±Ùس اÙÙÙØª ÙØ§ÙÙ
ÙØ§Ø±Ø¯ ÙØªØ¯Ø±ÙØš ÙÙ
ÙØ°Ø¬ Ù
Ù Ø§ÙØµÙر. Ø¥Ù Ø¥ÙØªØ§Ø¬ ÙØ°Ø§ اÙÙÙØ¹ Ù
Ù Ø§ÙØªØ¹ÙÙÙ
ات Ø§ÙØšØ±Ù
Ø¬ÙØ© ØºÙØ± اÙÙ
عتÙ
دة عÙÙ ÙØ³Ø® ÙØ¹Ù٠أÙ٠إذا ÙØ¬Ø رÙ
ز٠Ù
ع ÙÙØ³Ø®Ø© ÙØ§ØØ¯Ø©Ø ÙØ³ÙتÙ
ت؎غÙÙÙ Ù
ع أخر٠- طاÙÙ
ا تÙ
ØªØ¯Ø±ÙØšÙ ÙÙ
ÙÙ
Ø© Ù
Ù
Ø§Ø«ÙØ© - ØØªÙ إذا ÙØ§Ùت Ø§ÙØšÙÙØ© اÙÙ
عÙ
Ø§Ø±ÙØ© Ù
ختÙÙØ©.
ØªØ°ÙØ± Ø£Ù Ø§ÙØšÙÙØ© ØªØŽÙØ± Ø¥ÙÙ ÙÙÙ٠اÙÙÙ
ÙØ°Ø¬Ø ÙØ§ÙÙØ³Ø® ÙÙ Ø§ÙØ£ÙØ²Ø§Ù ÙØšÙÙØ© Ù
عÙ
Ø§Ø±ÙØ© Ù
عÙÙØ©. عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ [BERT](https://huggingface.co/google-bert/bert-base-uncased) ÙÙ ØšÙÙØ© Ù
عÙ
Ø§Ø±ÙØ©Ø ÙÙ ØÙ٠أ٠`google-bert/bert-base-uncased` ÙÙ ÙØ³Ø®Ø©. "اÙÙÙ
ÙØ°Ø¬" ÙÙ Ù
ØµØ·ÙØ عاÙ
ÙÙ
ÙÙ Ø£Ù ÙØ¹Ù٠إÙ
ا Ø§ÙØšÙÙØ© Ø£Ù ÙØ§ÙÙØ³Ø®Ø©.
ÙÙ ÙØ°Ø§ Ø§ÙØšØ±ÙاÙ
ج Ø§ÙØªØ¹ÙÙÙ
ÙØ ستتعÙÙ
ÙÙÙÙØ©:
* تØÙ
ÙÙ Ù
ÙØ¬Ø²ÙØŠ Ø§ÙØ±Ù
ÙØ² Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§
* تØÙ
ÙÙ Ù
Ø¹Ø§ÙØ¬ ØµÙØ± Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§
* تØÙ
ÙÙ Ù
ستخرج Ù
ÙØ²Ø§Øª Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§
* تØÙ
ÙÙ Ù
Ø¹Ø§ÙØ¬ Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§
* تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§
* تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ ÙØ¹Ù
ÙØ¯ ÙÙØ±Ù
## AutoTokenizer
تؚدأ ÙÙ Ù
ÙÙ
Ø© NLP ØªÙØ±ÙØšÙØ§ ØšÙ
ÙØ¬Ø²ÙØŠ ÙÙØ±Ù
ÙØ². ÙÙÙÙ
اÙÙ
ÙØ¬Ø²ÙØŠ ؚتØÙÙ٠اÙÙØµ Ø¥ÙÙ ØŽÙÙ ÙÙ
ÙÙ ÙÙÙÙ
ÙØ°Ø¬ Ù
Ø¹Ø§ÙØ¬ØªÙ.
ÙÙ
ؚتØÙ
Ù٠اÙÙ
ÙØ¬Ø²ÙØŠ ؚاستخداÙ
[`AutoTokenizer.from_pretrained`]:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
```
Ø«Ù
ÙÙ
ؚتØÙÙ٠إدخاÙ٠عÙ٠اÙÙØÙ Ø§ÙÙ
ÙØ¶Ø Ø£Ø¯ÙØ§Ù:
```py
>>> sequence = "In a hole in the ground there lived a hobbit."
>>> print(tokenizer(sequence))
{'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
## Ù
Ø¹Ø§ÙØ¬ Ø§ÙØµÙر Ø§ÙØªÙÙØ§ØŠÙ (AutoImageProcessor)
ؚاÙÙØ³ØšØ© ÙÙ
ÙÙ
ات Ø§ÙØ±Ø€ÙØ©Ø ÙÙÙÙ
Ù
Ø¹Ø§ÙØ¬ Ø§ÙØµÙر ØšÙ
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙرة Ø¥ÙÙ ØªÙØ³ÙÙ Ø§ÙØ¥Ø¯Ø®Ø§Ù Ø§ÙØµØÙØ.
```py
>>> from transformers import AutoImageProcessor
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
```
## AutoBackbone
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png">
<figcaption class="mt-2 text-center text-sm text-gray-500">Ø§ÙØµÙرة ØªÙØ¶Ø Ù
خطط Ù
راØÙ ÙÙ
ÙØ°Ø¬ Swin.</figcaption>
</div>
ÙØ³Ù
Ø ÙÙ [`AutoBackbone`] ؚاستخداÙ
اÙÙÙ
اذج اÙÙ
ÙØ¯Ø±ØšØ© Ù
سؚÙÙØ§ ÙØ¹Ù
ÙØ¯ ÙÙØ±Ù ÙÙØØµÙ٠عÙ٠خرا؊ط Ù
ÙØ²Ø§Øª Ù
Ù Ù
راØÙ Ù
ختÙÙØ© Ù
Ù Ø§ÙØ¹Ù
ÙØ¯ اÙÙÙØ±Ù. ÙØ¬Øš عÙÙÙ ØªØØ¯Ùد Ø£ØØ¯ اÙÙ
عÙÙ
ات Ø§ÙØªØ§ÙÙØ© ÙÙ [`~PretrainedConfig.from_pretrained`]:
* `out_indices` ÙÙ ÙÙØ±Ø³ Ø§ÙØ·ØšÙØ© Ø§ÙØªÙ ØªØ±ÙØ¯ Ø§ÙØØµÙ٠عÙÙ Ø®Ø±ÙØ·Ø© اÙÙ
ÙØ²Ø§Øª Ù
ÙÙØ§
* `out_features` Ù٠اسÙ
Ø§ÙØ·ØšÙØ© Ø§ÙØªÙ ØªØ±ÙØ¯ Ø§ÙØØµÙ٠عÙÙ Ø®Ø±ÙØ·Ø© اÙÙ
ÙØ²Ø§Øª Ù
ÙÙØ§
ÙÙ
Ù٠استخداÙ
ÙØ°Ù اÙÙ
عÙÙ
ات ؚ؎ÙÙ Ù
ØªØšØ§Ø¯ÙØ ÙÙÙ٠إذا ÙÙØª تستخدÙ
ÙÙØ§Ù Ù
ÙÙØ§Ø ÙØªØ£Ùد Ù
٠أÙÙØ§ Ù
ØªÙØ§ØŠÙ
Ø© Ù
ع ØšØ¹Ø¶ÙØ§ Ø§ÙØšØ¹Ø¶! إذا ÙÙ
تÙ
رر Ø£ÙÙØ§ Ù
Ù ÙØ°Ù اÙÙ
عÙÙ
Ø§ØªØ ÙØ³ÙÙÙÙ
Ø§ÙØ¹Ù
ÙØ¯ اÙÙÙØ±Ù ؚإرجاع Ø®Ø±ÙØ·Ø© اÙÙ
ÙØ²Ø§Øª Ù
Ù Ø§ÙØ·ØšÙØ© Ø§ÙØ£Ø®Ùرة.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png">
<figcaption class="mt-2 text-center text-sm text-gray-500">ØµÙØ±Ø© ØªÙØ¶Ø Ø®Ø±ÙØ·Ø© Ù
ÙØ²Ø§Øª Ù
٠اÙÙ
رØÙØ© Ø§ÙØ£ÙÙÙ ÙÙØ¹Ù
ÙØ¯ اÙÙÙØ±Ù.</figcaption>
</div>
عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ ÙÙ Ø§ÙØ±Ø³Ù
Ø§ÙØªØ®Ø·ÙØ·Ù Ø£Ø¹ÙØ§ÙØ ÙØ¥Ø±Ø¬Ø§Ø¹ Ø®Ø±ÙØ·Ø© اÙÙ
ÙØ²Ø§Øª Ù
٠اÙÙ
رØÙØ© Ø§ÙØ£ÙÙÙ Ù
Ù Ø§ÙØ¹Ù
ÙØ¯ اÙÙÙØ±Ù SwinØ ÙÙ
ÙÙ٠تعÙÙÙ `out_indices=(1,)`:
```py
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
>>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> feature_maps = outputs.feature_maps
```
Ø§ÙØ¢Ù ÙÙ
ÙÙ٠اÙÙØµÙ٠إÙÙ ÙØ§ØŠÙ `feature_maps` Ù
٠اÙÙ
رØÙØ© Ø§ÙØ£ÙÙÙ Ù
Ù Ø§ÙØ¹Ù
ÙØ¯ اÙÙÙØ±Ù:
```py
>>> list(feature_maps[0].shape)
[1, 96, 56, 56]
```
## Ù
ستخرج اÙÙ
ÙØ²Ø§Øª Ø§ÙØªÙÙØ§ØŠÙ (AutoFeatureExtractor)
ؚاÙÙØ³ØšØ© ÙÙÙ
ÙØ§Ù
Ø§ÙØµÙØªÙØ©Ø ÙÙÙÙ
Ù
ستخرج اÙÙ
ÙØ²Ø§Øª ØšÙ
Ø¹Ø§ÙØ¬Ø© إ؎ارة Ø§ÙØµÙت Ø¥ÙÙ ØªÙØ³ÙÙ Ø§ÙØ¥Ø¯Ø®Ø§Ù Ø§ÙØµØÙØ.
ÙÙ
ؚتØÙ
ÙÙ Ù
ستخرج Ù
ÙØ²Ø§Øª ؚاستخداÙ
[`AutoFeatureExtractor.from_pretrained`]:
```py
>>> from transformers import AutoFeatureExtractor
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(
... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
... )
```
## اÙÙ
Ø¹Ø§ÙØ¬ Ø§ÙØªÙÙØ§ØŠÙ (AutoProcessor)
ØªØªØ·ÙØš Ø§ÙÙ
ÙØ§Ù
Ù
تعددة اÙÙØ³Ø§ØŠØ· Ù
Ø¹Ø§ÙØ¬Ùا ÙØ¬Ù
ع ØšÙÙ ÙÙØ¹ÙÙ Ù
Ù Ø£Ø¯ÙØ§Øª اÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙ
Ø³ØšÙØ©. عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ ÙØªØ·ÙØš ÙÙ
ÙØ°Ø¬ [LayoutLMV2](model_doc/layoutlmv2) Ù
Ø¹Ø§ÙØ¬ ØµÙØ± ÙÙ
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙر ÙÙ
ÙØ¬Ø²ÙØŠ ÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙØµØ ÙØ¬Ù
ع اÙÙ
Ø¹Ø§ÙØ¬ ÙÙÙÙÙ
ا.
ÙÙ
ؚتØÙ
ÙÙ Ù
Ø¹Ø§ÙØ¬ ؚاستخداÙ
[`AutoProcessor.from_pretrained`]:
```py
>>> from transformers import AutoProcessor
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
```
## اÙÙÙ
ÙØ°Ø¬ Ø§ÙØªÙÙØ§ØŠÙ (AutoModel)
<frameworkcontent>
<pt>
تسÙ
Ø ÙÙ ÙØŠØ§Øª `AutoModelFor` ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§ ÙÙ
ÙÙ
Ø© Ù
عÙÙØ© (راجع [ÙÙØ§](model_doc/auto) ÙÙØØµÙ٠عÙÙ ÙØ§ØŠÙ
Ø© ÙØ§Ù
ÙØ© ؚاÙÙ
ÙØ§Ù
اÙÙ
ØªØ§ØØ©). عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ ÙÙ
ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ ÙØªØµÙÙÙ Ø§ÙØªØ³Ùس٠ؚاستخداÙ
[`AutoModelForSequenceClassification.from_pretrained`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
أعد استخداÙ
ÙÙØ³ ÙÙØ·Ø© Ø§ÙØªÙØªÙØŽ ÙØªØÙ
ÙÙ ØšÙÙØ© ÙÙ
ÙÙ
Ø© Ù
ختÙÙØ©:
```py
>>> from transformers import AutoModelForTokenClassification
>>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
<Tip warning={true}>
ؚاÙÙØ³ØšØ© ÙÙÙ
اذج PyTorchØ ØªØ³ØªØ®Ø¯Ù
طرÙÙØ© `from_pretrained()` `torch.load()` Ø§ÙØªÙ تستخدÙ
داخÙÙÙØ§ `pickle` ÙØ§Ùت٠ÙÙØ¹Ø±Ù Ø£ÙÙØ§ ØºÙØ± Ø¢Ù
ÙØ©. ؚ؎Ù٠عاÙ
Ø ÙØ§ تÙÙ
Ù
Ø·ÙÙÙØ§ ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ ÙØ¯ ÙÙÙÙ Ù
صدر٠Ù
ØµØ¯Ø±ÙØ§ ØºÙØ± Ù
ÙØ«ÙÙ ØšÙØ Ø£Ù ÙØ¯ ÙÙÙ٠تÙ
Ø§ÙØ¹ØšØ« ØšÙ. ÙØªÙ
تخÙÙÙ ÙØ°Ø§ Ø§ÙØ®Ø·Ø± Ø§ÙØ£Ù
Ù٠جز؊ÙÙØ§ ÙÙÙÙ
اذج Ø§ÙØ¹Ø§Ù
Ø© اÙÙ
Ø³ØªØ¶Ø§ÙØ© عÙÙ Hub Hugging FaceØ ÙØ§ÙØªÙ ÙØªÙ
[ÙØØµÙØ§ ØšØØ«Ùا Ø¹Ù Ø§ÙØšØ±Ø§Ù
ج Ø§ÙØ¶Ø§Ø±Ø©](https://huggingface.co/docs/hub/security-malware) ÙÙ ÙÙ Ø§Ø±ØªÙØ§Øš. راجع [ØªÙØ«ÙÙ Hub](https://huggingface.co/docs/hub/security) ÙÙØØµÙ٠عÙÙ Ø£ÙØ¶Ù اÙÙ
Ù
ارسات Ù
ث٠[Ø§ÙØªØÙÙ Ù
Ù Ø§ÙØªÙÙÙØ¹](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) ؚاستخداÙ
GPG.
ÙØ§ تتأثر ÙÙØ§Ø· ØªÙØªÙØŽ TensorFlow Ù FlaxØ ÙÙÙ
Ù٠تØÙ
ÙÙÙØ§ داخ٠ؚÙÙØ§Øª PyTorch ؚاستخداÙ
`from_tf` Ù `from_flax` kwargs ÙØ·Ø±ÙÙØ© `from_pretrained` ÙÙØªØØ§Ù٠عÙÙ ÙØ°Ù اÙÙ
ØŽÙÙØ©.
</Tip>
ؚ؎Ù٠عاÙ
Ø ÙÙØµÙ ؚاستخداÙ
ÙØŠØ© `AutoTokenizer` ÙÙØŠØ© `AutoModelFor` ÙØªØÙ
ÙÙ Ù
Ø«ÙÙØ§Øª Ù
ÙØ¯Ø±ØšØ© Ù
سؚÙÙØ§ Ù
٠اÙÙÙ
اذج. Ø³ÙØ³Ø§Ø¹Ø¯Ù ÙØ°Ø§ Ù٠تØÙ
ÙÙ Ø§ÙØšÙÙØ© Ø§ÙØµØÙØØ© ÙÙ ÙÙ Ù
رة. ÙÙ Ø§ÙØšØ±ÙاÙ
ج Ø§ÙØªØ¹ÙÙÙ
Ù Ø§ÙØªØ§ÙÙØ تعر٠عÙÙ ÙÙÙÙØ© استخداÙ
اÙÙ
ØÙ٠اÙÙØºÙÙ ÙÙ
Ø¹Ø§ÙØ¬ Ø§ÙØµÙر ÙÙ
ستخرج اÙÙ
ÙØ²Ø§Øª ÙØ§ÙÙ
Ø¹Ø§ÙØ¬ Ø§ÙØ°Ù تÙ
تØÙ
ÙÙÙ ØØ¯ÙØ«ÙØ§ ÙÙ
Ø¹Ø§ÙØ¬Ø© Ù
جÙ
ÙØ¹Ø© ØšÙØ§Ùات ÙÙØ¶ØšØ· Ø§ÙØ¯ÙÙÙ.
</pt>
<tf>
Ø£Ø®ÙØ±ÙØ§Ø ØªØ³Ù
Ø ÙÙ ÙØŠØ§Øª `TFAutoModelFor` ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
ÙØ¯Ø±Øš Ù
سؚÙÙØ§ ÙÙ
ÙÙ
Ø© Ù
عÙÙØ© (راجع [ÙÙØ§](model_doc/auto) ÙÙØØµÙ٠عÙÙ ÙØ§ØŠÙ
Ø© ÙØ§Ù
ÙØ© ؚاÙÙ
ÙØ§Ù
اÙÙ
ØªØ§ØØ©). عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ ÙÙ
ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ ÙØªØµÙÙÙ Ø§ÙØªØ³Ùس٠ؚاستخداÙ
[`TFAutoModelForSequenceClassification.from_pretrained`]:
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
أعد استخداÙ
ÙÙØ³ ÙÙØ·Ø© Ø§ÙØªÙØªÙØŽ ÙØªØÙ
ÙÙ ØšÙÙØ© ÙÙ
ÙÙ
Ø© Ù
ختÙÙØ©:
```py
>>> from transformers import TFAutoModelForTokenClassification
>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased")
```
ؚ؎Ù٠عاÙ
Ø ÙÙØµÙ ؚاستخداÙ
ÙØŠØ© `AutoTokenizer` ÙÙØŠØ© `TFAutoModelFor` ÙØªØÙ
ÙÙ ÙØ³Ø® ÙÙÙ
اذج Ù
ÙØ¯Ø±ØšØ© Ù
سؚÙÙØ§. Ø³ÙØ³Ø§Ø¹Ø¯Ù ÙØ°Ø§ Ù٠تØÙ
ÙÙ Ø§ÙØšÙÙØ© Ø§ÙØµØÙØØ© ÙÙ ÙÙ Ù
رة. ÙÙ Ø§ÙØšØ±ÙاÙ
ج Ø§ÙØªØ¹ÙÙÙ
Ù Ø§ÙØªØ§ÙÙØ ستتعر٠عÙÙ ÙÙÙÙØ© استخداÙ
اÙÙ
ÙØ¬Ø²ÙØŠ اÙÙØºÙÙ ÙÙ
Ø¹Ø§ÙØ¬ Ø§ÙØµÙر ÙÙ
ستخرج اÙÙ
ÙØ²Ø§Øª ÙØ§ÙÙ
Ø¹Ø§ÙØ¬ Ø§ÙØ°Ù تÙ
تØÙ
ÙÙÙ ØØ¯ÙØ«ÙØ§ ÙÙ
Ø¹Ø§ÙØ¬Ø© Ù
جÙ
ÙØ¹Ø© ØšÙØ§Ùات ÙÙØ¶ØšØ· Ø§ÙØ¯ÙÙÙ.
</tf>
</frameworkcontent>
| transformers/docs/source/ar/autoclass_tutorial.md/0 | {
"file_path": "transformers/docs/source/ar/autoclass_tutorial.md",
"repo_id": "transformers",
"token_count": 5440
} |
# ؎ار٠ÙÙ
ÙØ°Ø¬Ù Ù
ع Ø§ÙØ¹Ø§ÙÙ
Ø£ØžÙØ±Øª آخر درسÙ٠تعÙÙÙ
ÙÙÙ ÙÙÙÙØ© ضؚط ÙÙ
ÙØ°Ø¬ ØšØ¯ÙØ© ؚاستخداÙ
PyTorch Ù Keras Ù ð€ Accelerate ÙØ¹Ù
ÙÙØ§Øª Ø§ÙØªÙÙØŠØ© اÙÙ
ÙØ²Ø¹Ø©. ÙØ§ÙØ®Ø·ÙØ© Ø§ÙØªØ§ÙÙØ© ÙÙ Ù
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬Ù Ù
ع اÙÙ
جتÙ
ع! ÙÙ Hugging FaceØ ÙØ€Ù
٠ؚاÙÙ
ØŽØ§Ø±ÙØ© اÙÙ
ÙØªÙØØ© ÙÙÙ
Ø¹Ø±ÙØ© ÙØ§ÙÙ
ÙØ§Ø±Ø¯ ÙØªÙ
ÙÙÙ Ø§ÙØ¬Ù
ÙØ¹ Ù
Ù Ø§ÙØ§Ø³ØªÙادة Ù
Ù Ø§ÙØ°Ùاء Ø§ÙØ§ØµØ·ÙاعÙ. ÙÙØŽØ¬Ø¹Ù عÙÙ Ù
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬Ù Ù
ع اÙÙ
جتÙ
ع ÙÙ
ساعدة Ø§ÙØ¢Ø®Ø±Ù٠عÙ٠تÙÙÙØ± اÙÙÙØª ÙØ§ÙÙ
ÙØ§Ø±Ø¯.
ÙÙ ÙØ°Ø§ Ø§ÙØ¯Ø±Ø³Ø ستتعÙÙ
طرÙÙØªÙÙ ÙÙ
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬Ù اÙÙ
درؚ Ø£Ù Ù
Ø¶ØšÙØ· عÙÙ Ù
ÙØµØ© [Model Hub](https://huggingface.co/models):
- Ø±ÙØ¹ Ù
ÙÙØ§ØªÙ Ø¥ÙÙ Ù
ÙØµØ© Hub Ù
ؚا؎رة ؚاستخداÙ
اÙÙÙØ¯ Ø§ÙØšØ±Ù
جÙ.
- ÙÙ
ØšØ³ØØš ÙØ¥ÙÙØ§Øª Ù
ÙÙØ§ØªÙ Ø¥ÙÙ Hub ؚاستخداÙ
اÙÙØ§Ø¬ÙØ© web.
<iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="Ù
؎غ٠ÙÙØ¯ÙÙ YouTube"
frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;
picture-in-picture" allowfullscreen></iframe>
<Tip>
ÙÙ
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬ Ù
ع اÙÙ
جتÙ
Ø¹Ø ØªØØªØ§Ø¬ Ø¥ÙÙ ØØ³Ø§Øš عÙÙ [huggingface.co](https://huggingface.co/join). ÙÙ
ÙÙÙ Ø£ÙØ¶Ùا Ø§ÙØ§ÙضÙ
اÙ
Ø¥ÙÙ Ù
ÙØžÙ
Ø© Ù
ÙØ¬Ùدة Ø£Ù Ø¥ÙØŽØ§Ø¡ Ù
ÙØžÙ
Ø© Ø¬Ø¯ÙØ¯Ø©.
</Tip>
## Ù
ÙØ²Ø§Øª اÙÙ
Ø³ØªÙØ¯Ø¹
ÙØ¹Ù
Ù ÙÙ Ù
Ø³ØªÙØ¯Ø¹ عÙÙ Model Hub Ù
ث٠Ù
Ø³ØªÙØ¯Ø¹ GitHub اÙÙØªÙÙÙØ¯Ù. ØªÙØ¯Ù
Ù
Ø³ØªÙØ¯Ø¹Ø§ØªÙا Ø§ÙØªØÙÙ
ÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø±Ø§Øª ÙØ³Ø¬Ù Ø§ÙØªØºÙÙØ±Ø§ØªØ ÙÙØ¯Ø±Ø© عÙÙ Ø±Ø€ÙØ© Ø§ÙØ§Ø®ØªÙØ§ÙØ§Øª ØšÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø±Ø§Øª.
تعتÙ
د Ø¢ÙÙØ© Ø§ÙØªØÙÙ
ÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø±Ø§Øª عÙÙ Ù
ÙØµØ© Model Hub عÙÙ ÙØžØ§Ù
Ù git Ù [git-lfs](https://git-lfs.github.com/). ÙØšØ¹ØšØ§Ø±Ø© Ø£Ø®Ø±ÙØ ÙÙ
ÙÙÙ Ø§ÙØªØ¹Ø§Ù
Ù Ù
ع ÙÙ ÙÙ
ÙØ°Ø¬ ÙØ£ÙÙ Ù
Ø³ØªÙØ¯Ø¹ Ù
ستÙÙØ Ù
Ù
ا ÙÙ
ÙÙÙ Ù
Ù Ø²ÙØ§Ø¯Ø© Ø§ÙØªØÙÙ
Ù٠اÙÙØµÙÙ ÙØ§ÙÙØ§ØšÙÙØ© ÙÙØªØ·ÙÙØ±. ÙØ³Ù
Ø Ø§ÙØªØÙÙ
ÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø± ؚإجراء تعدÙÙØ§Øª ÙØªØ«ØšÙت إصدار Ù
ØØ¯Ø¯ Ù
٠اÙÙÙ
ÙØ°Ø¬ ؚاستخداÙ
رÙ
ز Ø§ÙØªØºÙÙØ± (commit hash) Ø£Ù ÙØ³Ù
(tag) Ø£Ù ÙØ±Ø¹ (branch).
ØšÙØ¶Ù ÙØ°Ù اÙÙ
ÙØ²Ø©Ø ÙÙ
ÙÙ٠تØÙ
Ù٠إصدار Ù
ØØ¯Ø¯ Ù
٠اÙÙÙ
ÙØ°Ø¬ ؚاستخداÙ
Ù
عÙÙ
Ø© Ø§ÙØ¥ØµØ¯Ø§Ø± "revision":
```py
>>> model = AutoModel.from_pretrained(
... "julien-c/EsperBERTo-small", revision="4c77982" # اسÙ
Ø§ÙØ¹ÙاÙ
Ø©Ø Ø£Ù Ø§Ø³Ù
اÙÙØ±Ø¹Ø أ٠تجز؊ة Ø§ÙØ§ÙتزاÙ
... )
```
Ù
Ù Ø§ÙØ³ÙÙ Ø£ÙØ¶Ùا تعدÙ٠اÙÙ
ÙÙØ§Øª اÙÙ
ÙØ¬Ùدة داخ٠Ù
Ø³ØªÙØ¯Ø¹Ø ÙÙÙ
ÙÙ٠عرض Ø³Ø¬Ù Ø§ÙØªØºÙÙØ±Ø§Øª Ø§ÙØªÙ طرأت عÙÙ ÙØ°Ù اÙÙ
ÙÙØ§Øª ÙÙ
عاÙÙØ© Ø§ÙØ§Ø®ØªÙØ§ÙØ§Øª ØšÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø±Ø§Øª اÙÙ
ختÙÙØ©:

## Ø§ÙØ¥Ø¹Ø¯Ø§Ø¯
ÙØšÙ Ù
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬ عÙÙ HubØ Ø³ØªØØªØ§Ø¬ Ø¥ÙÙ ØšÙØ§Ùات اعتÙ
اد ØØ³Ø§Øš Hugging Face Ø§ÙØ®Ø§ØµØ© ØšÙ. إذا ÙÙØª تستخدÙ
Ù
ÙØµØ© Ø§ÙØ£ÙاÙ
Ø±Ø ÙÙÙ
ؚت؎غÙÙ Ø§ÙØ£Ù
ر Ø§ÙØªØ§ÙÙ ÙÙ ØšÙØŠØ© Ø§ÙØªØ±Ø§Ø¶ÙØ© ØÙØ« تÙ
ØªØ«ØšÙØª ð€ Transformers. سÙÙÙÙ
ÙØ°Ø§ Ø§ÙØ£Ù
ر ؚتخزÙ٠رÙ
ز Ø§ÙØ¯Ø®ÙÙ Ø§ÙØ®Ø§Øµ ØšÙ ÙÙ Ù
Ø¬ÙØ¯ تخزÙ٠اÙÙ
Ø€ÙØª ÙÙ Hugging Face (`~/.cache/` ؚ؎ÙÙ Ø§ÙØªØ±Ø§Ø¶Ù):
```bash
huggingface-cli login
```
إذا ÙÙØª تستخدÙ
Ø¯ÙØªØ± Ù
ÙØ§ØØžØ§Øª Ù
ث٠Jupyter Ø£Ù ColaboratoryØ ÙØªØ£Ùد Ù
Ù ØªØ«ØšÙØª Ù
ÙØªØšØ© [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library). تسÙ
Ø ÙÙ ÙØ°Ù اÙÙ
ÙØªØšØ© ØšØ§ÙØªÙاع٠ؚرÙ
جÙÙØ§ Ù
ع Hub.
```bash
pip install huggingface_hub
```
Ø«Ù
استخدÙ
`notebook_login` ÙØªØ³Ø¬ÙÙ Ø§ÙØ¯Ø®Ù٠إÙÙ HubØ ÙØ§ØªØšØ¹ Ø§ÙØ±Ø§ØšØ· [ÙÙØ§](https://huggingface.co/settings/token) ÙØ¥Ù؎اء رÙ
ز ÙÙØªØ³Ø¬ÙÙ:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## تØÙÙ٠اÙÙÙ
ÙØ°Ø¬ ÙÙØªÙاÙÙ Ù
ع جÙ
ÙØ¹ Ø§ÙØ£Ø·Ø± Ø§ÙØ¹Ù
Ù
ÙØ¶Ù
ا٠إÙ
ÙØ§ÙÙØ© استخداÙ
ÙÙ
ÙØ°Ø¬Ù Ù
Ù ÙØšÙ ؎خص ÙØ¹Ù
٠ؚإطار عÙ
Ù Ù
ختÙÙØ ÙÙØµÙ ؚتØÙÙÙ ÙÙ
ÙØ°Ø¬Ù ÙØ±Ùع٠Ù
ع ÙÙØ§Ø· Ø§ÙØªØÙÙ Ù
Ù PyTorch Ù TensorFlow. ÙÙ ØÙ٠أ٠اÙÙ
ستخدÙ
ÙÙ ÙØ§ ÙØ²Ø§Ù ؚإÙ
ÙØ§ÙÙÙ
تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬Ù Ù
٠إطار عÙ
Ù Ù
ختÙ٠إذا ØªØ®Ø·ÙØª ÙØ°Ù Ø§ÙØ®Ø·ÙØ©Ø Ø¥ÙØ§ Ø£Ù٠سÙÙÙ٠أؚطأ ÙØ£Ù ð€ Transformers Ø³ØªØØªØ§Ø¬ Ø¥Ù٠تØÙÙÙ ÙÙØ·Ø© Ø§ÙØªØÙÙ Ø£Ø«ÙØ§Ø¡ Ø§ÙØªØŽØºÙÙ.
تØÙÙÙ ÙÙØ·Ø© Ø§ÙØªØÙÙ ÙØ¥Ø·Ø§Ø± عÙ
٠آخر Ø£Ù
ر سÙÙ. ØªØ£ÙØ¯ Ù
Ù ØªØ«ØšÙØª PyTorch Ù TensorFlow (راجع [ÙÙØ§](installation) ÙØªØ¹ÙÙÙ
ات Ø§ÙØªØ«ØšÙت)Ø Ø«Ù
Ø§ØšØØ« ع٠اÙÙÙ
ÙØ°Ø¬ اÙÙ
ÙØ§ØŠÙ
ÙÙ
ÙÙ
ت٠ÙÙ Ø§ÙØ¥Ø·Ø§Ø± Ø§ÙØ¢Ø®Ø±.
<frameworkcontent>
<pt>
ØØ¯Ø¯ `from_tf=True` ÙØªØÙÙÙ ÙÙØ·Ø© تØÙÙ Ù
Ù TensorFlow Ø¥ÙÙ PyTorch:
```py
>>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True)
>>> pt_model.save_pretrained("path/to/awesome-name-you-picked")
```
</pt>
<tf>
ØØ¯Ø¯ `from_pt=True` ÙØªØÙÙÙ ÙÙØ·Ø© تØÙÙ Ù
Ù PyTorch Ø¥ÙÙ TensorFlow:
```py
>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True)
```
ؚعد ذÙÙØ ÙÙ
ÙÙÙ ØÙØž ÙÙ
ÙØ°Ø¬ TensorFlow Ø§ÙØ¬Ø¯Ùد ØšÙÙØ·Ø© Ø§ÙØªØÙÙ Ø§ÙØ¬Ø¯Ùدة:
```py
>>> tf_model.save_pretrained("path/to/awesome-name-you-picked")
```
</tf>
<jax>
إذا ÙØ§Ù اÙÙÙ
ÙØ°Ø¬ Ù
تاØÙا ÙÙ FlaxØ ÙÙÙ
ÙÙÙ Ø£ÙØ¶Ùا تØÙÙÙ ÙÙØ·Ø© تØÙÙ Ù
Ù PyTorch Ø¥ÙÙ Flax:
```py
>>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained(
... "path/to/awesome-name-you-picked", from_pt=True
... )
```
</jax>
</frameworkcontent>
## Ø¯ÙØ¹ ÙÙ
ÙØ°Ø¬ Ø£Ø«ÙØ§Ø¡ Ø§ÙØªØ¯Ø±ÙØš
<frameworkcontent>
<pt>
<Youtube id="Z1-XMy-GNLQ"/>
Ù
ØŽØ§Ø±ÙØ© ÙÙ
ÙØ°Ø¬Ù عÙÙ Hub Ù
ر ØšØ³ÙØ· ÙÙØºØ§ÙØ© ÙÙ Ù
ا عÙÙÙ ÙÙ Ø¥Ø¶Ø§ÙØ© Ù
عÙÙ
Ø© أ٠استدعاء رد إضاÙÙ. ÙÙ
ا ØªØ°ÙØ± Ù
٠درس [Ø§ÙØªØ¯Ø±ÙØš Ø§ÙØ¯ÙÙÙ](training)Ø ÙØ¥Ù ÙØŠØ© [`TrainingArguments`] Ù٠اÙÙ
ÙØ§Ù Ø§ÙØ°Ù ØªØØ¯Ø¯ ÙÙ٠اÙÙ
عÙÙ
ات اÙÙØ§ØŠÙØ© ÙØ®Ùارات Ø§ÙØªØ¯Ø±ÙØš Ø§ÙØ¥Ø¶Ø§ÙÙØ©. ت؎Ù
Ù Ø¥ØØ¯Ù Ø®ÙØ§Ø±Ø§Øª Ø§ÙØªØ¯Ø±ÙØš ÙØ°Ù اÙÙØ¯Ø±Ø© عÙÙ Ø¯ÙØ¹ اÙÙÙ
ÙØ°Ø¬ Ù
ؚا؎رة Ø¥Ù٠اÙÙ
ÙØµØ© Hub. ÙÙ
ؚتعÙÙÙ `push_to_hub=True` ÙÙ [`TrainingArguments`]:
```py
>>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True)
```
Ù
رر Ù
عاÙ
ﻻت Ø§ÙØªØ¯Ø±ÙØš ÙØ§ÙÙ
عتاد Ø¥ÙÙ [`Trainer`]:
```py
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... )
```
ؚعد ضؚط ÙÙ
ÙØ°Ø¬Ù ØšØ¯ÙØ©Ø ÙÙ
ÙÙ٠استخداÙ
Ø¯Ø§ÙØ© [`~transformers.Trainer.push_to_hub`] اÙÙ
ØªØ§ØØ© ÙÙ [`Trainer`] ÙØ¯Ùع اÙÙÙ
ÙØ°Ø¬ اÙÙ
درؚ Ø¥Ù٠اÙÙ
ÙØµØ© Hub. سÙ٠تضÙÙ ð€ Transformers تÙÙØ§ØŠÙÙØ§ اÙÙ
عÙÙ
ات اÙÙØ§ØŠÙØ© اÙÙ
ستخدÙ
Ø© ÙÙ Ø§ÙØªØ¯Ø±ÙØš ÙÙØªØ§ØŠØ¬ Ø§ÙØªØ¯Ø±ÙØš ÙØ¥ØµØ¯Ø§Ø±Ø§Øª Ø§ÙØ¥Ø·Ø§Ø± Ø¥ÙÙ ØšØ·Ø§ÙØ© Ù
عÙÙÙ
ات اÙÙÙ
ÙØ°Ø¬ Ø§ÙØ®Ø§ØµØ© ØšÙ!
```py
>>> trainer.push_to_hub()
```
</pt>
<tf>
؎ار٠ÙÙ
ÙØ°Ø¬Ùا عÙÙ Hub ؚاستخداÙ
[`PushToHubCallback`]. ÙÙ Ø¯Ø§ÙØ© [`PushToHubCallback`], أضÙ:
- دÙÙ٠إخراج ÙÙÙ
ÙØ°Ø¬Ù.
- Ù
ÙØ¬Ø²ÙØŠ اÙÙØºÙÙ.
- `hub_model_id`Ø ÙØ§Ùذ٠Ù٠اسÙ
Ù
ستخدÙ
Hub ÙØ§Ø³Ù
اÙÙÙ
ÙØ°Ø¬ Ø§ÙØ®Ø§Øµ ØšÙ.
```py
>>> from transformers import PushToHubCallback
>>> push_to_hub_callback = PushToHubCallback(
... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model"
... )
```
Ø£Ø¶Ù Ø§ÙØ§Ø³ØªØ¯Ø¹Ø§Ø¡ Ø¥ÙÙ [`fit`](https://keras.io/api/models/model_training_apis/)Ø ÙØ³ÙÙÙÙ
ð€ Transformers ØšØ¯ÙØ¹ اÙÙÙ
ÙØ°Ø¬ اÙÙ
درؚ Ø¥ÙÙ Hub:
```py
>>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback)
```
</tf>
</frameworkcontent>
## استخداÙ
Ø¯Ø§ÙØ© `push_to_hub`
ÙÙ
ÙÙÙ Ø£ÙØ¶Ùا استدعاء `push_to_hub` Ù
ؚا؎رة عÙÙ ÙÙ
ÙØ°Ø¬Ù ÙØªØÙ
ÙÙ٠إÙÙ Hub.
ØØ¯Ø¯ اسÙ
ÙÙ
ÙØ°Ø¬Ù ÙÙ `push_to_hub`:
```py
>>> pt_model.push_to_hub("my-awesome-model")
```
ÙÙØŽØŠ ÙØ°Ø§ Ù
Ø³ØªÙØ¯Ø¹Ùا ØªØØª اسÙ
اÙÙ
ستخدÙ
Ø§ÙØ®Ø§Øµ ؚ٠ؚاسÙ
ÙÙ
ÙØ°Ø¬ `my-awesome-model`. ÙÙ
ÙÙ ÙÙÙ
ستخدÙ
ÙÙ Ø§ÙØ¢Ù تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬Ù ؚاستخداÙ
Ø¯Ø§ÙØ© `from_pretrained`:
```py
>>> from transformers import AutoModel
>>> model = AutoModel.from_pretrained("your_username/my-awesome-model")
```
```py
>>> from transformers import AutoModel
>>> model = AutoModel.from_pretrained("your_username/my-awesome-model")
```
إذا ÙÙØª ØªÙØªÙ
٠إÙÙ Ù
ÙØžÙ
Ø© ÙØªØ±Ùد Ø¯ÙØ¹ ÙÙ
ÙØ°Ø¬Ù ØªØØª اسÙ
اÙÙ
ÙØžÙ
Ø© ØšØ¯ÙØ§Ù Ù
٠ذÙÙØ ÙÙ
ا عÙÙ٠سÙÙ Ø¥Ø¶Ø§ÙØªÙ Ø¥ÙÙ `repo_id`:
```py
>>> pt_model.push_to_hub("my-awesome-org/my-awesome-model")
```
ÙÙ
ÙÙ Ø£ÙØ¶Ùا استخداÙ
Ø¯Ø§ÙØ© `push_to_hub` ÙØ¥Ø¶Ø§ÙØ© Ù
ÙÙØ§Øª أخر٠إÙÙ Ù
Ø³ØªÙØ¯Ø¹ اÙÙÙ
اذج. عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ أض٠رÙ
ÙØ²Ùا Ø¥ÙÙ Ù
Ø³ØªÙØ¯Ø¹ ÙÙ
ÙØ°Ø¬:
```py
>>> tokenizer.push_to_hub("my-awesome-model")
```
أ٠رؚÙ
ا ØªØ±ÙØ¯ Ø¥Ø¶Ø§ÙØ© إصدار TensorFlow Ù
Ù ÙÙ
ÙØ°Ø¬ PyTorch اÙÙ
Ø¶ØšÙØ·:
```py
>>> tf_model.push_to_hub("my-awesome-model")
```
Ø§ÙØ¢Ù Ø¹ÙØ¯ Ø§ÙØ§ÙØªÙØ§Ù Ø¥ÙÙ Ù
ÙÙÙ Ø§ÙØŽØ®ØµÙ عÙÙ Hugging FaceØ ÙØ¬Øš أ٠تر٠Ù
Ø³ØªÙØ¯Ø¹ اÙÙÙ
اذج Ø§ÙØ°Ù Ø£ÙØŽØ£ØªÙ ØØ¯ÙØ«ÙØ§. Ø³ÙØ€Ø¯Ù اÙÙÙØ± ÙÙÙ Ø¹ÙØ§Ù
Ø© Ø§ÙØªØšÙÙØš **Files** Ø¥Ù٠عرض جÙ
ÙØ¹ اÙÙ
ÙÙØ§Øª Ø§ÙØªÙ ÙÙ
ت ؚتØÙ
ÙÙÙØ§ Ù٠اÙÙ
Ø³ØªÙØ¯Ø¹.
ÙÙØØµÙ٠عÙÙ Ù
Ø²ÙØ¯ Ù
Ù Ø§ÙØªÙاصÙÙ ØÙÙ ÙÙÙÙØ© Ø¥ÙØŽØ§Ø¡ اÙÙ
ÙÙØ§Øª ÙØªØÙ
ÙÙÙØ§ Ø¥ÙÙ Ù
Ø³ØªÙØ¯Ø¹Ø راجع ÙØ«Ø§ØŠÙ Hub [ÙÙØ§](https://huggingface.co/docs/hub/how-to-upstream).
## Ø§ÙØªØÙ
Ù٠ؚاستخداÙ
اÙÙØ§Ø¬ÙØ© web
ÙÙ
ÙÙ ÙÙÙ
ستخدÙ
ÙÙ Ø§ÙØ°ÙÙ ÙÙØ¶ÙÙÙ ÙÙØ¬ عدÙ
Ø§ÙØªØ±Ù
ÙØ² تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
Ù Ø®ÙØ§Ù ÙØ§Ø¬ÙØ© Hub web. ÙÙ
ØšØ²ÙØ§Ø±Ø© [huggingface.co/new](https://huggingface.co/new) ÙØ¥Ù؎اء Ù
Ø³ØªÙØ¯Ø¹ Ø¬Ø¯ÙØ¯:

Ù
Ù ÙÙØ§Ø أض٠ؚعض اÙÙ
عÙÙÙ
ات ØÙÙ ÙÙ
ÙØ°Ø¬Ù:
- ØØ¯Ø¯ **Ù
اÙÙ** اÙÙ
Ø³ØªÙØ¯Ø¹. ÙÙ
Ù٠أ٠ÙÙÙÙ ÙØ°Ø§ Ø£ÙØª أ٠أ٠Ù
٠اÙÙ
ÙØžÙ
ات Ø§ÙØªÙ ØªÙØªÙ
٠إÙÙÙØ§.
- اختر اسÙ
ÙØ§ ÙÙÙ
ÙØ°Ø¬ÙØ ÙØ§Ùذ٠سÙÙÙÙ Ø£ÙØ¶Ùا اسÙ
اÙÙ
Ø³ØªÙØ¯Ø¹.
- اختر Ù
ا إذا ÙØ§Ù ÙÙ
ÙØ°Ø¬Ù عاÙ
ÙØ§ Ø£Ù
Ø®Ø§ØµÙØ§.
- ØØ¯Ø¯ ØªØ±Ø®ÙØµ Ø§ÙØ§Ø³ØªØ®Ø¯Ø§Ù
ÙÙÙ
ÙØ°Ø¬Ù.
Ø§ÙØ¢Ù اÙÙØ± ÙÙÙ Ø¹ÙØ§Ù
Ø© Ø§ÙØªØšÙÙØš **Files** Ø«Ù
اÙÙØ± ÙÙÙ Ø§ÙØ²Ø± **Add file** ÙØ¥Ø¶Ø§ÙØ© Ù
ÙÙ Ø¬Ø¯ÙØ¯ Ø¥ÙÙ Ù
Ø³ØªÙØ¯Ø¹Ù. Ø«Ù
Ø§Ø³ØØš ÙØ£Ø³ÙØ· Ù
ÙÙÙØ§ ÙØªØÙ
ÙÙÙ ÙØ£Ø¶Ù Ø±Ø³Ø§ÙØ© Ø§ÙØ§ÙتزاÙ
.

## Ø¥Ø¶Ø§ÙØ© ØšØ·Ø§ÙØ© ÙÙ
ÙØ°Ø¬
ÙÙØªØ£Ùد Ù
Ù ÙÙÙ
اÙÙ
ستخدÙ
ÙÙ ÙÙØ¯Ø±Ø§Øª ÙÙ
ÙØ°Ø¬Ù ÙÙÙÙØ¯Ù ÙØªØÙزات٠اÙÙ
ØØªÙ
ÙØ© ÙØ§Ø¹ØªØšØ§Ø±Ø§ØªÙ Ø§ÙØ£Ø®ÙاÙÙØ©Ø ÙØ±Ø¬Ù Ø¥Ø¶Ø§ÙØ© ØšØ·Ø§ÙØ© ÙÙ
ÙØ°Ø¬ Ø¥ÙÙ Ù
Ø³ØªÙØ¯Ø¹Ù. ÙØªÙ
تعرÙÙ ØšØ·Ø§ÙØ© اÙÙÙ
ÙØ°Ø¬ ÙÙ Ù
ÙÙ `README.md`. ÙÙ
ÙÙÙ Ø¥Ø¶Ø§ÙØ© ØšØ·Ø§ÙØ© ÙÙ
ÙØ°Ø¬ ع٠طرÙÙ:
* ÙÙ
ØšØ¥ÙØŽØ§Ø¡ Ù
ÙÙ `README.md` ÙØªØÙ
ÙÙÙ ÙØ¯ÙÙÙØ§.
* اÙÙØ± ÙÙÙ Ø§ÙØ²Ø± **Edit model card** ÙÙ Ù
Ø³ØªÙØ¯Ø¹ ÙÙ
ÙØ°Ø¬Ù.
اÙÙ ÙØžØ±Ø© عÙÙ ØšØ·Ø§ÙØ© [DistilBert](https://huggingface.co/distilbert/distilbert-base-uncased) ÙÙØØµÙ٠عÙÙ Ù
Ø«Ø§Ù Ø¬ÙØ¯ عÙÙ ÙÙØ¹ اÙÙ
عÙÙÙ
ات Ø§ÙØªÙ ÙØ¬Øš أ٠تتضÙ
ÙÙØ§ ØšØ·Ø§ÙØ© اÙÙÙ
ÙØ°Ø¬. ÙÙØØµÙ٠عÙÙ Ù
Ø²ÙØ¯ Ù
Ù Ø§ÙØªÙاصÙÙ ØÙÙ Ø§ÙØ®Ùارات Ø§ÙØ£Ø®Ø±Ù Ø§ÙØªÙ ÙÙ
ÙÙÙ Ø§ÙØªØÙÙ
ÙÙÙØ§ ÙÙ Ù
ÙÙ `README.md` Ù
Ø«Ù Ø§ÙØšØµÙ
Ø© اÙÙØ±ØšÙÙÙØ© ÙÙÙÙ
ÙØ°Ø¬ أ٠أÙ
Ø«ÙØ© Ø§ÙØ£Ø¯Ø§Ø©Ø راجع اÙÙØ«Ø§ØŠÙ [ÙÙØ§](https://huggingface.co/docs/hub/models-cards). | transformers/docs/source/ar/model_sharing.md/0 | {
"file_path": "transformers/docs/source/ar/model_sharing.md",
"repo_id": "transformers",
"token_count": 6706
} |
# Ù
ا Ø§ÙØ°Ù ØªØ³ØªØ·ÙØ¹ Ù
ÙØªØšØ© ð€ Transformers اÙÙÙØ§Ù
ØšÙØ
Ù
ÙØªØšØ© ð€ Transformers ÙÙ Ù
جÙ
ÙØ¹Ø© Ù
٠اÙÙÙ
اذج اÙÙ
ÙØ¯Ø±Ùؚة Ù
سؚÙÙØ§ Ø§ÙØ£Ùض٠ÙÙ ÙØŠØªÙا ÙÙ
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ© Ø§ÙØ·ØšÙØ¹ÙØ© (NLP)Ø ÙØ±Ø€ÙØ© Ø§ÙØØ§Ø³ÙØšØ ÙÙ
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙت ÙØ§ÙÙÙØ§Ù
. ÙØ§ ØªØØªÙ٠اÙÙ
ÙØªØšØ© ÙÙØ· عÙÙ ÙÙ
اذج اÙÙ
ØÙÙØ§Øª (Transformer) ÙØØ³ØšØ ØšÙ ØªØŽÙ
Ù Ø£ÙØ¶Ùا ÙÙ
اذج Ø£Ø®Ø±Ù ÙØ§ تعتÙ
د عÙ٠اÙÙ
ØÙÙØ§Øª Ù
Ø«Ù Ø§ÙØŽØšÙات Ø§ÙØ¹ØµØšÙØ© Ø§ÙØªÙاÙÙÙÙØ© Ø§ÙØØ¯ÙØ«Ø© ÙÙ
ÙØ§Ù
Ø±Ø€ÙØ© Ø§ÙØØ§Ø³ÙØš. إذا ÙØžØ±Øª Ø¥Ù٠ؚعض اÙÙ
ÙØªØ¬Ø§Øª Ø§ÙØ§Ø³ØªÙÙØ§ÙÙØ© Ø§ÙØ£Ùثر ØŽÙÙØ¹Ùا اÙÙÙÙ
Ø Ù
ث٠اÙÙÙØ§ØªÙ Ø§ÙØ°ÙÙØ© ÙØ§ÙتطؚÙÙØ§Øª ÙØ£Ø¬Ùزة Ø§ÙØªÙÙØ§Ø²Ø ÙÙ
٠اÙÙ
ØØªÙ
٠أ٠تÙÙ ÙØ±Ø§Ø¡Ùا تÙÙÙØ© Ù
ا Ù
٠تÙÙÙØ§Øª Ø§ÙØªØ¹ÙÙ
Ø§ÙØ¹Ù
ÙÙ. ÙÙ ØªØ±ÙØ¯ Ø¥Ø²Ø§ÙØ© جسÙ
Ù
Ù Ø®ÙÙÙØ© ØµÙØ±Ø© Ø§ÙØªÙØ·ØªÙØ§ ØšÙØ§ØªÙÙ Ø§ÙØ°ÙÙØ ÙØ°Ø§ Ù
ثا٠عÙÙ Ù
ÙÙ
Ø© Ø§ÙØªØ¬Ø²ØŠØ© Ø§ÙØšØ§ÙÙØ±Ø§Ù
ÙØ© (Panoptic Segmentation) ( ÙØ§ تÙÙ٠إذا ÙÙ
تÙÙÙ
Ù
Ø¹ÙØ§Ùا ØšØ¹Ø¯Ø ÙØ³ÙÙ ÙØŽØ±ØÙا ÙÙ Ø§ÙØ£ÙساÙ
Ø§ÙØªØ§ÙÙØ©!).
تÙÙØ± ÙØ°Ù Ø§ÙØµÙØØ© ÙØžØ±Ø© عاÙ
Ø© عÙÙ Ù
ختÙÙ Ù
ÙØ§Ù
اÙÙÙØ§Ù
ÙØ§ÙØµÙØª ÙØ±Ø€ÙØ© Ø§ÙØØ§Ø³ÙØš ÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ§Øª Ø§ÙØ·ØšÙØ¹ÙØ© اÙÙ
ختÙÙØ© Ø§ÙØªÙ ÙÙ
ÙÙ ØÙÙØ§ ؚاستخداÙ
Ù
ÙØªØšØ© ð€ Transformers ÙÙ Ø«ÙØ§Ø«Ø© أسطر ÙÙØ· Ù
Ù Ø§ÙØªØ¹ÙÙÙ
ات Ø§ÙØšØ±Ù
Ø¬ÙØ©!
## Ø§ÙØµÙت
تختÙÙ Ù
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙت ÙØ§ÙÙÙØ§Ù
ÙÙÙÙØ§Ù ع٠ؚاÙ٠اÙÙØ³Ø§ØŠØ·Ø ÙÙØ±Ø¬Ø¹ ذÙ٠ؚؚ؎ÙÙ Ø£Ø³Ø§Ø³Ù ÙØ£Ù Ø§ÙØµÙت ÙÙ
دخ٠Ù٠إ؎ارة Ù
ØªØµÙØ©. عÙÙ Ø¹ÙØ³ اÙÙØµØ ÙØ§ ÙÙ
ÙÙ ØªÙØ³ÙÙ
اÙÙ
ÙØ¬Ø© Ø§ÙØµÙØªÙØ© Ø§ÙØ®Ø§Ù
ؚ؎ÙÙ Ù
رتؚ Ù٠أجزاء Ù
ÙÙØµÙØ© ØšØ§ÙØ·Ø±ÙÙØ© Ø§ÙØªÙ ÙÙ
ÙÙ ØšÙØ§ ØªÙØ³ÙÙ
Ø§ÙØ¬Ù
ÙØ© Ø¥ÙÙ ÙÙÙ
ات. ÙÙÙØªØºÙØš عÙÙ ÙØ°Ø§Ø ÙØªÙ
عادة٠أخذ عÙÙØ§Øª Ù
Ù Ø§ÙØ¥ØŽØ§Ø±Ø© Ø§ÙØµÙØªÙØ© Ø§ÙØ®Ø§Ù
عÙÙ ÙØªØ±Ø§Øª زÙ
ÙÙØ© Ù
ÙØªØžÙ
Ø©. ÙÙÙ
ا زاد عدد Ø§ÙØ¹ÙÙØ§Øª Ø§ÙØªÙ ت؀خذ ÙÙ ÙØªØ±Ø© زÙ
ÙÙØ© Ù
عÙÙØ©Ø Ø§Ø±ØªÙØ¹ Ù
عد٠أخذ Ø§ÙØ¹ÙÙØ§Øª (Ù
Ø¹Ø¯Ù Ø§ÙØªØ±Ø¯Ø¯)Ø ÙØµØ§Ø± Ø§ÙØµÙت Ø£ÙØ±Øš Ø¥ÙÙ Ù
صدر Ø§ÙØµÙت Ø§ÙØ£ØµÙÙ.
ÙØ§Ù
ت Ø§ÙØ·Ø±Ù Ø§ÙØ³Ø§ØšÙØ© ØšÙ
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙت ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ اÙÙ
ÙØ²Ø§Øª اÙÙ
ÙÙØ¯Ø© Ù
ÙÙ. Ø£ØµØšØ Ù
Ù Ø§ÙØŽØ§ØŠØ¹ Ø§ÙØ¢Ù Ø§ÙØšØ¯Ø¡ ØšÙ
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© Ø§ÙØµÙت ÙØ§ÙÙÙØ§Ù
ع٠طرÙÙ ØªØºØ°ÙØ© ØŽÙ٠اÙÙ
ÙØ¬Ø© Ø§ÙØµÙØªÙØ© Ø§ÙØ®Ø§Ù
Ù
ؚا؎رة ÙÙ Ù
ØŽÙØ± اÙÙ
ÙØ²Ø§Øª (Feature Encoder) ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ تÙ
Ø«ÙÙ ØµÙØªÙ ÙÙ. ÙÙØ°Ø§ ÙØšØ³Ø· Ø®Ø·ÙØ© اÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙ
Ø³ØšÙØ© ÙÙØ³Ù
Ø ÙÙÙÙ
ÙØ°Ø¬ ؚتعÙÙ
Ø£ÙÙ
اÙÙ
ÙØ²Ø§Øª.
### تصÙÙÙ Ø§ÙØµÙت
تصÙÙÙ Ø§ÙØµÙت (Audio Classification) ÙÙ Ù
ÙÙ
Ø© ÙØªÙ
ÙÙÙØ§ تصÙÙÙ ØšÙØ§Ùات Ø§ÙØµÙت Ø§ÙØµÙت Ù
Ù Ù
جÙ
ÙØ¹Ø© Ù
ØØ¯Ø¯Ø© Ù
سؚÙÙØ§ Ù
٠اÙÙØŠØ§Øª. Ø¥ÙÙ ÙØŠØ© ÙØ§Ø³Ø¹Ø© تضÙ
Ø§ÙØ¹Ø¯Ùد Ù
Ù Ø§ÙØªØ·ØšÙÙØ§Øª اÙÙ
ØØ¯Ø¯Ø©Ø ÙØ§Ùت٠ت؎Ù
Ù:
* تصÙÙ٠اÙÙ
ØŽÙØ¯ Ø§ÙØµÙتÙ: ÙØ¶Ø¹ Ø¹ÙØ§Ù
Ø© عÙÙ Ø§ÙØµÙت ؚاستخداÙ
تسÙ
ÙØ© اÙÙ
ØŽÙØ¯ ("اÙÙ
ÙØªØš"Ø "Ø§ÙØŽØ§Ø·ØŠ"Ø "اÙÙ
ÙØ¹Øš")
* Ø§ÙØªØŽØ§Ù Ø§ÙØ£ØØ¯Ø§Ø« Ø§ÙØµÙØªÙØ©: ÙØ¶Ø¹ Ø¹ÙØ§Ù
Ø© عÙÙ Ø§ÙØµÙت ؚاستخداÙ
تسÙ
ÙØ© ØØ¯Ø« ØµÙØªÙ ("ØšÙÙ Ø§ÙØ³Ùارة"Ø "ØµÙØª Ø§ÙØÙØª"Ø "ÙØ³Ø± زجاج")
* اÙÙØ³Ù
: ÙØµÙÙÙ ØµÙØª ÙØØªÙ٠عÙÙ Ø£ØµÙØ§Øª Ù
تعددة (Ø£ØµÙØ§Øª Ø§ÙØ·ÙÙØ±Ø ÙØªØØ¯Ùد ÙÙÙØ© اÙÙ
ØªØØ¯Ø« Ù٠اجتÙ
اع)
* تصÙÙ٠اÙÙ
ÙØ³ÙÙÙ: ÙØ¶Ø¹ Ø¹ÙØ§Ù
Ø© عÙ٠اÙÙ
ÙØ³ÙÙ٠ؚتسÙ
ÙØ© اÙÙÙØ¹ ("Ù
ÙØªØ§Ù"Ø "ÙÙØš ÙÙØš"Ø "ÙØ§ÙترÙ")
```py
>>> from transformers import pipeline
>>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er")
>>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> preds
[{'score': 0.4532, 'label': 'hap'},
{'score': 0.3622, 'label': 'sad'},
{'score': 0.0943, 'label': 'neu'},
{'score': 0.0903, 'label': 'ang'}]
```
### Ø§ÙØªØ¹Ø±Ù Ø§ÙØªÙÙØ§ØŠÙ عÙ٠اÙÙÙØ§Ù
ÙÙÙÙ
Ø§ÙØªØ¹Ø±Ù Ø§ÙØªÙÙØ§ØŠÙ عÙ٠اÙÙÙØ§Ù
(ASR) Ù٠عÙ
ÙÙØ© تØÙÙ٠اÙÙÙØ§Ù
Ø¥ÙÙ ÙØµ. Ø¥ÙÙ Ø£ØØ¯ Ø£ÙØ«Ø± اÙÙ
ÙØ§Ù
Ø§ÙØµÙØªÙØ© ØŽÙÙØ¹Ùا ÙÙØ±Ø¬Ø¹ ذÙ٠جز؊ÙÙØ§ Ø¥Ù٠أ٠اÙÙÙØ§Ù
ÙØ³ÙÙØ© Ø·ØšÙØ¹ÙØ© ÙÙØªÙØ§ØµÙ Ø§ÙØšØŽØ±Ù. ÙØ§ÙÙÙÙ
Ø ÙØªÙ
تضÙ
ÙÙ Ø£ÙØžÙ
Ø© ASR ÙÙ Ù
ÙØªØ¬Ø§Øª Ø§ÙØªÙÙÙØ© "Ø§ÙØ°ÙÙØ©" Ù
ث٠Ù
ÙØšØ±Ø§Øª Ø§ÙØµÙت ÙØ§ÙÙÙØ§ØªÙ ÙØ§ÙØ³ÙØ§Ø±Ø§Øª. ÙÙ
ÙÙÙØ§ Ø£Ù ÙØ·ÙØš Ù
Ù Ù
ساعدÙÙØ§ Ø§ÙØ§ÙتراضÙÙ٠ت؎غÙ٠اÙÙ
ÙØ³ÙÙÙØ ÙØ¶ØšØ· Ø§ÙØªØ°ÙÙØ±Ø§ØªØ ÙØ¥Ø®ØšØ§Ø±Ùا ؚأØÙØ§Ù Ø§ÙØ·Ùس.
ÙÙÙÙ Ø£ØØ¯ Ø§ÙØªØØ¯Ùات Ø§ÙØ±ØŠÙØ³ÙØ© Ø§ÙØªÙ ساعدت ÙÙ
اذج اÙÙ
ØÙÙØ§Øª (Transformer) ÙÙ Ø§ÙØªØºÙØš عÙÙÙØ§ ÙÙ Ø§ÙØªØ¹Ø§Ù
Ù Ù
ع اÙÙØºØ§Øª Ù
ÙØ®Ùضة اÙÙ
ÙØ§Ø±Ø¯. ÙÙ
Ù Ø®ÙØ§Ù Ø§ÙØªØ¯Ø±ÙØš اÙÙ
سؚ٠عÙÙ ÙÙ
ÙØ§Øª ÙØšÙرة Ù
Ù ØšÙØ§Ùات Ø§ÙØµÙØªÙØ©Ø ÙÙÙ
Ù٠ضؚط اÙÙÙ
ÙØ°Ø¬ ØšØ¯ÙØ© (Fine-tuning) ؚاستخداÙ
ساعة ÙØ§ØØ¯Ø© ÙÙØ· Ù
Ù ØšÙØ§Ùات اÙÙÙØ§Ù
اÙÙ
ÙÙØ³Ù
ÙÙ ÙØºØ© Ù
ÙØ®Ùضة اÙÙ
ÙØ§Ø±Ø¯ Ø¥ÙÙ ÙØªØ§ØŠØ¬ عاÙÙØ© Ø§ÙØ¬Ùدة Ù
ÙØ§Ø±ÙØ© ØšØ£ÙØžÙ
Ø© ASR Ø§ÙØ³Ø§ØšÙØ© Ø§ÙØªÙ تÙ
ØªØ¯Ø±ÙØšÙا عÙÙ ØšÙØ§Ùات Ù
ÙØ³ÙÙ
Ø© Ø£ÙØ«Ø± ØšÙ 100 Ù
رة.
```py
>>> from transformers import pipeline
>>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small")
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
```
## Ø±Ø€ÙØ© Ø§ÙØØ§Ø³Øš
ÙØ§Ùت Ø¥ØØ¯Ù Ø£ÙØ§ØŠÙ Ù
ÙØ§Ù
Ø±Ø€ÙØ© Ø§ÙØØ§Ø³Øš ÙØ£ÙجØÙا ÙÙ Ø§ÙØªØ¹Ø±Ù عÙÙ ØµÙØ± Ø£Ø±ÙØ§Ù
Ø§ÙØ±Ù
ÙØ² Ø§ÙØšØ±ÙØ¯ÙØ© ؚاستخداÙ
[ØŽØšÙØ© Ø¹ØµØšÙØ© ØªÙØ§ÙÙÙÙØ© (CNN)](glossary#convolution). تتÙÙÙ Ø§ÙØµÙرة Ù
Ù ÙØØ¯Ø§Øª ØšÙÙØ³ÙØ ÙÙÙÙ ØšÙØ³Ù ÙÙÙ
Ø© رÙÙ
ÙØ©. ÙÙØ°Ø§ ÙØ¬Ø¹Ù Ù
Ù Ø§ÙØ³Ù٠تÙ
Ø«ÙÙ ØµÙØ±Ø© ÙÙ
صÙÙÙØ© Ù
Ù ÙÙÙ
Ø§ÙØšÙسÙ. ÙØµÙ ÙÙ Ù
Ø²ÙØ¬ Ù
عÙÙ Ù
Ù ÙÙÙ
Ø§ÙØšÙس٠أÙÙØ§Ù Ø§ÙØµÙرة.
ÙÙØ§Ù طرÙÙØªØ§Ù عاÙ
تا٠ÙÙ
ÙÙ Ù
Ù Ø®ÙØ§ÙÙÙ
ا ØÙ Ù
ÙØ§Ù
Ø±Ø€ÙØ© Ø§ÙØØ§Ø³Øš:
1. استخداÙ
Ø§ÙØ§ÙØªÙØ§Ùات (Convolutions) ÙØªØ¹ÙÙ
اÙÙ
ÙØ²Ø§Øª اÙÙØ±Ù
ÙØ© ÙÙØµÙرة ØšØ¯Ø¡ÙØ§ Ù
٠اÙÙ
ÙØ²Ø§Øª Ù
ÙØ®Ùضة اÙÙ
ستÙÙ ÙØµÙÙÙØ§ Ø¥ÙÙ Ø§ÙØ£ØŽÙاء اÙÙ
جردة عاÙÙØ© اÙÙ
ستÙÙ.
2. ØªÙØ³ÙÙ
Ø§ÙØµÙرة Ø¥Ù٠أجزاء ÙØ§Ø³ØªØ®Ø¯Ø§Ù
ÙÙ
ÙØ°Ø¬ اÙÙ
ØÙÙØ§Øª (Transformer) ÙÙØªØ¹ÙÙ
ØªØ¯Ø±ÙØ¬Ùا٠ÙÙ٠ترتؚط Ù٠جزء ØµÙØ±Ø© ØšØšØ¹Ø¶ÙØ§ Ø§ÙØšØ¹Ø¶ ÙØªØŽÙÙÙ ØµÙØ±Ø©. عÙÙ Ø¹ÙØ³ اÙÙÙØ¬ ا Ø§ÙØªØµØ§Ø¹Ø¯Ù (Bottom-Up) Ø§ÙØ°Ù ØªÙØ¶ÙÙ Ø§ÙØŽØšÙات Ø§ÙØ¹ØµØšÙØ© Ø§ÙØªÙاÙÙÙÙØ© CNNØ ÙØ°Ø§ ÙØŽØšÙ Ø¥ÙÙ ØØ¯ Ù
ا Ø§ÙØšØ¯Ø¡ ØšØµÙØ±Ø© Ø¶ØšØ§ØšÙØ© Ø«Ù
جعÙÙØ§ Ø£ÙØ¶Ø ØªØ¯Ø±ÙØ¬ÙÙØ§.
### تصÙÙÙ Ø§ÙØµÙر
ÙÙÙÙ
تصÙÙÙ Ø§ÙØµÙر (Image Classification) ØšÙØ¶Ø¹ Ø¹ÙØ§Ù
Ø© عÙÙ ØµÙØ±Ø© ÙØ§Ù
ÙØ© Ù
Ù Ù
جÙ
ÙØ¹Ø© Ù
ØØ¯Ø¯Ø© Ù
سؚÙÙØ§ Ù
٠اÙÙØŠØ§Øª. Ù
ث٠Ù
ع؞Ù
Ù
ÙØ§Ù
Ø§ÙØªØµÙÙÙØ ÙÙØ§Ù Ø§ÙØ¹Ø¯Ùد Ù
Ù Ø§ÙØªØ·ØšÙÙØ§Øª Ø§ÙØ¹Ù
ÙÙØ© ÙØªØµÙÙÙ Ø§ÙØµÙØ±Ø ÙØ§Ùت٠ت؎Ù
Ù:
* Ø§ÙØ±Ø¹Ø§ÙØ© Ø§ÙØµØÙØ©: تصÙÙÙ Ø§ÙØµÙر Ø§ÙØ·ØšÙØ© ÙÙÙØŽÙ Ø¹Ù Ø§ÙØ£Ù
راض Ø£Ù Ù
Ø±Ø§ÙØšØ© ØµØØ© اÙÙ
Ø±ÙØ¶
* Ø§ÙØšÙ؊ة: تصÙÙÙ ØµÙØ± Ø§ÙØ£ÙÙ
ار Ø§ÙØµÙØ§Ø¹ÙØ© ÙØ±ØµØ¯ Ø¥Ø²Ø§ÙØ© Ø§ÙØºØ§ØšØ§ØªØ Ø£Ù Ø¥ØšÙØ§Øº إدارة Ø§ÙØ£Ø±Ø§Ø¶Ù Ø§ÙØšØ±ÙØ© Ø£Ù Ø§ÙØªØŽØ§Ù ØØ±Ø§ØŠÙ Ø§ÙØºØ§ØšØ§Øª
* Ø§ÙØ²Ø±Ø§Ø¹Ø©: تصÙÙÙØ± اÙÙ
ØØ§ØµÙÙ ÙÙ
Ø±Ø§ÙØšØ© ØµØØ© اÙÙØšØ§Øª Ø£Ù ØµÙØ± Ø§ÙØ£ÙÙ
ار Ø§ÙØµÙØ§Ø¹ÙØ© ÙÙ
Ø±Ø§ÙØšØ© Ø§Ø³ØªØ®Ø¯Ø§Ù
Ø§ÙØ£Ø±Ø§Ø¶Ù
* عÙÙ
Ø§ÙØšÙ؊ة: تصÙÙÙ ØµÙØ± Ø§ÙØ£ÙÙØ§Ø¹ Ø§ÙØÙÙØ§ÙÙØ© أ٠اÙÙØšØ§ØªÙØ© ÙØ±ØµØ¯ أعداد اÙÙØ§ØŠÙات Ø§ÙØÙØ© أ٠تتؚع Ø§ÙØ£ÙÙØ§Ø¹ اÙÙ
ÙØ¯Ø¯Ø© ØšØ§ÙØ§ÙÙØ±Ø§Ø¶
```py
>>> from transformers import pipeline
>>> classifier = pipeline(task="image-classification")
>>> preds = classifier(
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> print(*preds, sep="\n")
{'score': 0.4335, 'label': 'lynx, catamount'}
{'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}
{'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}
{'score': 0.0239, 'label': 'Egyptian cat'}
{'score': 0.0229, 'label': 'tiger cat'}
```
### ÙØŽÙ Ø§ÙØ£Ø¬Ø³Ø§Ù
عÙÙ Ø¹ÙØ³ تصÙÙÙ Ø§ÙØµÙØ±Ø ÙÙÙÙ
ÙØŽÙ Ø§ÙØ£Ø¬Ø³Ø§Ù
(Object Detection) ØšØªØØ¯Ùد عدة أجساÙ
Ø¯Ø§Ø®Ù ØµÙØ±Ø© ÙÙ
ÙØ§Ø¶Ø¹ ÙØ°Ù Ø§ÙØ£Ø¬Ø³Ø§Ù
ÙÙ ØµÙØ±Ø© (ÙØØ¯Ø¯ÙØ§ Ù
رؚع Ø§ÙØ¥ØØ§Ø·Ø©). ؚعض تطؚÙÙØ§Øª ÙØŽÙ Ø§ÙØ£Ø¬Ø³Ø§Ù
ت؎Ù
Ù:
* اÙÙ
Ø±ÙØšØ§Øª Ø°Ø§ØªÙØ© اÙÙÙØ§Ø¯Ø©: Ø§ÙØªØŽØ§Ù أجساÙ
اÙÙ
Ø±ÙØ±ÙØ© اÙÙÙÙ
ÙØ© Ù
ث٠اÙÙ
Ø±ÙØšØ§Øª Ø§ÙØ£Ø®Ø±Ù ÙØ§ÙÙ
؎اة ÙØ¥ØŽØ§Ø±Ø§Øª اÙÙ
Ø±ÙØ±
* Ø§ÙØ§Ø³ØªØŽØ¹Ø§Ø± Ø¹Ù ØšÙØ¹Ø¯: Ù
Ø±Ø§ÙØšØ© Ø§ÙÙÙØ§Ø±Ø«Ø ÙØ§ÙØªØ®Ø·ÙØ· Ø§ÙØØ¶Ø±ÙØ ÙØ§ÙØªÙØšØ€ ØšØ§ÙØ·Ùس
* Ø§ÙØªØŽØ§Ù Ø§ÙØ¹ÙÙØš: Ø§ÙØªØŽØ§Ù Ø§ÙØŽÙÙÙ Ø£Ù Ø§ÙØ£Ø¶Ø±Ø§Ø± اÙÙÙÙÙÙØ© Ù٠اÙÙ
ؚاÙÙØ ÙØ¹ÙÙØš Ø§ÙØªØµÙÙØ¹
```py
>>> from transformers import pipeline
>>> detector = pipeline(task="object-detection")
>>> preds = detector(
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds]
>>> preds
[{'score': 0.9865,
'label': 'cat',
'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}]
```
### تجز؊ة Ø§ÙØµÙر
تجز؊ة Ø§ÙØµÙرة (Image Segmentation) ÙÙ Ù
ÙÙ
Ø© عÙÙ Ù
ستÙÙ Ø§ÙØšÙس٠تÙÙÙ
ØšØªØ®ØµÙØµ ÙÙ ØšÙØ³Ù ÙÙ ØµÙØ±Ø© ÙÙØŠØ© Ù
عÙÙØ©. Ø¥ÙÙ ÙØ®ØªÙÙ Ø¹Ù ÙØŽÙ Ø§ÙØ£Ø¬Ø³Ø§Ù
Ø ÙØ§ÙØ°Ù ÙØ³ØªØ®Ø¯Ù
Ù
رؚعات Ø§ÙØ¥ØØ§Ø·Ø© (Bounding Boxes) ÙØªØµÙÙÙ ÙØ§ÙØªÙØšØ€ ØšØ§ÙØ£Ø¬Ø³Ø§Ù
ÙÙ Ø§ÙØµÙرة ÙØ£Ù Ø§ÙØªØ¬Ø²ØŠØ© Ø£ÙØ«Ø± Ø¯ÙØ©. ÙÙ
ÙÙ ÙØªØ¬Ø²ØŠØ© Ø§ÙØµÙر Ø§ÙØªØŽØ§Ù Ø§ÙØ£Ø¬Ø³Ø§Ù
عÙÙ Ù
ستÙÙ Ø§ÙØšÙسÙ. ÙÙØ§Ù عدة Ø£ÙÙØ§Ø¹ Ù
٠تجز؊ة Ø§ÙØµÙر:
* تجز؊ة Ù
Ø«ÙÙØ§Øª (Instance Segmentation): ØšØ§ÙØ¥Ø¶Ø§ÙØ© Ø¥Ù٠تصÙÙÙ ÙØŠØ© ÙØ§ØŠÙØ ÙØ¥ÙÙØ§ ØªÙØµÙÙÙ Ø£ÙØ¶Ùا ÙÙ Ù
Ø«ÙÙ (Instance) Ù
Ù
ÙØ² ÙÙØ§ØŠÙ ("اÙÙÙØš-1"Ø "اÙÙÙØš-2")
* Ø§ÙØªØ¬Ø²ØŠØ© Ø§ÙØšØ§ÙÙØ±Ø§Ù
ÙØ© (Panoptic Segmentation): Ù
Ø²ÙØ¬ Ù
Ù Ø§ÙØªØ¬Ø²ØŠØ© Ø§ÙØ¯ÙاÙÙØ© (Semantic Segmentation) ÙØªØ¬Ø²ØŠØ© اÙÙ
Ø«ÙÙØ§ØªØ ÙÙÙ ØªÙØµÙÙÙ ÙÙ ØšÙØ³Ù Ù
ع ÙØŠØ© Ø¯ÙØ§ÙÙØ© **Ù** ÙÙ Ù
Ø«ÙÙ Ù
Ù
ÙØ² ÙÙØ§ØŠÙ
ØªÙØ¹Ø¯ Ù
ÙØ§Ù
تجز؊ة Ø§ÙØµÙر Ù
ÙÙØ¯Ø© Ù٠اÙÙ
Ø±ÙØšØ§Øª Ø°Ø§ØªÙØ© اÙÙÙØ§Ø¯Ø© عÙÙ Ø¥ÙØŽØ§Ø¡ Ø®Ø±ÙØ·Ø© عÙÙ Ù
ستÙÙ Ø§ÙØšÙس٠ÙÙØ¹Ø§ÙÙ
Ù
Ù ØÙÙÙØ§ ØØªÙ تتÙ
ÙÙ Ù
Ù Ø§ÙØªÙÙ٠ؚأÙ
ا٠ØÙ٠اÙÙ
؎اة ÙØ§ÙÙ
Ø±ÙØšØ§Øª Ø§ÙØ£Ø®Ø±Ù. ÙÙ
ا Ø£ÙÙØ§ Ù
ÙÙØ¯Ø© ÙÙØªØµÙÙØ± Ø§ÙØ·ØšÙØ ØÙØ« ÙÙ
ÙÙ ÙÙØ¯ÙØ© Ø§ÙØ¹Ø§ÙÙØ© ÙÙØ° اÙÙ
ÙÙ
Ø© أ٠تساعد ÙÙ ØªØØ¯Ùد Ø§ÙØ®ÙØ§ÙØ§ ØºÙØ± Ø§ÙØ·ØšÙØ¹ÙØ© أ٠خصا؊ص Ø§ÙØ£Ø¹Ø¶Ø§Ø¡. ÙÙ
ÙÙ Ø£ÙØ¶Ùا استخداÙ
تجز؊ة Ø§ÙØµÙر ÙÙ Ø§ÙØªØ¬Ø§Ø±Ø© Ø§ÙØ¥ÙÙØªØ±ÙÙÙØ© ÙØªØ¬Ø±ØšØ© اÙÙ
ÙØ§ØšØ³ Ø§ÙØªØ±Ø§Ø¶ÙÙØ§ Ø£Ù Ø¥ÙØŽØ§Ø¡ تجارؚ اÙÙØ§Ùع اÙÙ
ÙØ¹Ø²Ø² Ù
Ù Ø®ÙØ§Ù ØªØ±Ø§ÙØš Ø§ÙØ£Ø¬Ø³Ø§Ù
ÙÙ Ø§ÙØ¹Ø§ÙÙ
Ø§ÙØÙÙÙÙ Ù
Ù Ø®ÙØ§Ù اÙÙØ§Ù
ÙØ±Ø§ اÙÙØ§ØªÙ Ø§ÙØ®Ø§ØµØ© ØšÙ.
```py
>>> from transformers import pipeline
>>> segmenter = pipeline(task="image-segmentation")
>>> preds = segmenter(
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> print(*preds, sep="\n")
{'score': 0.9879, 'label': 'LABEL_184'}
{'score': 0.9973, 'label': 'snow'}
{'score': 0.9972, 'label': 'cat'}
```
### ØªÙØ¯Ùر Ø§ÙØ¹Ù
Ù
ÙÙÙÙ
ØªÙØ¯Ùر Ø§ÙØ¹Ù
Ù (Depth Estimation) ØšØ§ÙØªÙؚ؀ ØšÙ
Ø³Ø§ÙØ© ÙÙ ØšÙØ³Ù ÙÙ ØµÙØ±Ø© Ù
٠اÙÙØ§Ù
ÙØ±Ø§. ØªÙØ¹Ø¯ ÙØ°Ù اÙÙ
ÙÙ
Ø© ÙØ±Ø€ÙØ© Ø§ÙØØ§Ø³Øš ÙØ°Ù Ù
ÙÙ
Ø© ؚ؎Ù٠خاص ÙÙÙÙ
ÙØ¥Ø¹Ø§Ø¯Ø© ØšÙØ§Ø¡ اÙÙ
ØŽÙØ¯. ÙØ¹Ù٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ ÙÙ Ø§ÙØ³Ùارات Ø°Ø§ØªÙØ© اÙÙÙØ§Ø¯Ø©Ø ØªØØªØ§Ø¬ اÙÙ
Ø±ÙØšØ§Øª Ø¥ÙÙ ÙÙÙ
Ù
Ø¯Ù ØšÙØ¹Ø¯ Ø§ÙØ£Ø¬Ø³Ø§Ù
Ù
ث٠اÙÙ
؎اة ÙÙØ§Ùتات اÙÙ
Ø±ÙØ± ÙØ§ÙÙ
Ø±ÙØšØ§Øª Ø§ÙØ£Ø®Ø±Ù ÙØªØ¬ÙØš Ø§ÙØ¹Ùؚات ÙØ§ÙاصطداÙ
ات. تساعد Ù
عÙÙÙ
ات Ø§ÙØ¹Ù
Ù Ø£ÙØ¶Ùا ÙÙ ØšÙØ§Ø¡ Ø§ÙØªÙ
Ø«ÙÙØ§Øª Ø«ÙØ§Ø«ÙØ© Ø§ÙØ£ØšØ¹Ø§Ø¯ Ù
Ù Ø§ÙØµÙر Ø«ÙØ§ØŠÙØ© Ø§ÙØ£ØšØ¹Ø§Ø¯ ÙÙÙ
Ù٠استخداÙ
ÙØ§ ÙØ¥Ù؎اء تÙ
Ø«ÙÙØ§Øª Ø«ÙØ§Ø«ÙØ© Ø§ÙØ£ØšØ¹Ø§Ø¯ عاÙÙØ© Ø§ÙØ¬Ùدة ÙÙÙÙØ§ÙÙ Ø§ÙØšÙÙÙÙØ¬ÙØ© أ٠اÙÙ
ؚاÙÙ.
ÙÙØ§Ù ÙÙØ¬Ø§Ù ÙØªÙØ¯ÙØ± Ø§ÙØ¹Ù
Ù:
* Ø§ÙØªØµÙÙØ± اÙÙ
جسÙ
(Stereo): ÙØªÙ
ØªÙØ¯Ùر Ø§ÙØ¹Ù
٠ع٠طرÙÙ Ù
ÙØ§Ø±ÙØ© ØµÙØ±ØªÙÙ ÙÙÙØ³ Ø§ÙØµÙرة Ù
Ù Ø²ÙØ§Ùا Ù
ختÙÙØ© ÙÙÙÙØ§Ù.
* Ø§ÙØªØµÙÙØ± Ø§ÙØ£ØØ§Ø¯Ù (Monocular): ÙØªÙ
ØªÙØ¯Ùر Ø§ÙØ¹Ù
Ù Ù
Ù ØµÙØ±Ø© ÙØ§ØØ¯Ø©.
```py
>>> from transformers import pipeline
>>> depth_estimator = pipeline(task="depth-estimation")
>>> preds = depth_estimator(
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
```
## Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ§Øª Ø§ÙØ·ØšÙØ¹ÙØ©
ØªÙØ¹Ø¯ Ù
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ© Ø§ÙØ·ØšÙØ¹ÙØ© (NLP) Ù
Ù ØšÙÙ Ø£ÙØ«Ø± Ø£ÙÙØ§Ø¹ اÙÙ
ÙØ§Ù
ØŽÙÙØ¹Ùا ÙØžØ±Ùا ÙØ£Ù اÙÙØµ ÙÙ ÙØ³ÙÙØ© Ø·ØšÙØ¹ÙØ© ÙÙØ§ ÙÙØªÙاصÙ. ÙÙÙÙ ÙØªÙ
Ù٠اÙÙÙ
ÙØ°Ø¬ Ù
Ù ÙÙÙ
اÙÙØµØ ÙØ¬Øš Ø£ÙÙÙØ§ تØÙÙÙ٠إÙÙ ØµÙØºØ© رÙÙ
ÙØ©. ÙÙØ°Ø§ ÙØ¹ÙÙ ØªÙØ³ÙÙ
Ø³ÙØ³ÙØ© اÙÙØµ Ø¥ÙÙ ÙÙÙ
ات Ø£Ù Ù
ÙØ§Ø·Ø¹ ÙÙÙ
ات Ù
ÙÙØµÙØ© (رÙ
ÙØ² - Tokens)Ø Ø«Ù
تØÙÙÙ ÙØ°Ù Ø§ÙØ±Ù
ÙØ² Ø¥ÙÙ Ø£Ø±ÙØ§Ù
. ÙÙØªÙجة ÙØ°ÙÙØ ÙÙ
ÙÙ٠تÙ
Ø«ÙÙ Ø³ÙØ³ÙØ© Ù
٠اÙÙØµ ÙØªØ³Ùس٠Ù
Ù Ø§ÙØ£Ø±ÙاÙ
Ø ÙØšÙ
جرد ØØµÙÙ٠عÙÙ ØªØ³ÙØ³Ù Ù
Ù Ø§ÙØ£Ø±ÙاÙ
Ø ÙÙ
Ù٠إدخاÙ٠إÙÙ ÙÙ
ÙØ°Ø¬ ÙØÙ Ø¬Ù
ÙØ¹ Ø£ÙÙØ§Ø¹ Ù
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ© Ø§ÙØ·ØšÙØ¹ÙØ©!
### تصÙÙ٠اÙÙØµÙص
تÙ
اÙ
ÙØ§ Ù
ث٠Ù
ÙØ§Ù
Ø§ÙØªØµÙÙÙ Ù٠أ٠Ù
Ø¬Ø§Ù Ø¢Ø®Ø±Ø ÙÙÙÙ
تصÙÙ٠اÙÙØµÙص (Text Classification) ؚتصÙÙÙ Ø³ÙØ³ÙØ© ÙØµÙØ© ÙÙ
Ù٠أ٠تÙÙ٠جÙ
ÙØ© Ø£Ù ÙÙØ±Ø© Ø£Ù Ù
Ø³ØªÙØ¯) Ø¥ÙÙ ÙØŠØ© Ù
ØØ¯Ø¯Ø© Ù
سؚÙÙØ§. ÙÙØ§Ù Ø§ÙØ¹Ø¯Ùد Ù
Ù Ø§ÙØªØ·ØšÙÙØ§Øª Ø§ÙØ¹Ù
ÙÙØ© ÙØªØµÙÙ٠اÙÙØµÙØµØ ÙØ§Ùت٠ت؎Ù
Ù:
* تØÙÙ٠اÙÙ
؎اعر (Sentiment Analysis): تصÙÙ٠اÙÙØµ ÙÙÙÙØ§ ÙÙ
Ø¹ÙØ§Ø± Ù
عÙÙ Ù
ث٠`Ø§ÙØ¥ÙØ¬Ø§ØšÙØ©` Ø£Ù `Ø§ÙØ³ÙØšÙØ©` ÙØ§Ùت٠ÙÙ
ÙÙ Ø£Ù ØªÙØ¹ÙÙ
ÙØªØ¯Ø¹Ù
عÙ
ÙÙØ© ØµÙØ¹ اÙÙØ±Ø§Ø± ÙÙ Ù
Ø¬Ø§ÙØ§Øª Ù
Ø«Ù Ø§ÙØ³Ùاسة ÙØ§ÙتÙ
ÙÙÙ ÙØ§ÙتسÙÙÙ
* تصÙÙ٠اÙÙ
ØØªÙÙ (Content Classification): تصÙÙ٠اÙÙØµ ÙÙÙÙØ§ ÙØšØ¹Ø¶ اÙÙ
ÙØ¶Ùعات ÙÙÙ
ساعدة ÙÙ ØªÙØžÙÙ
ÙØªØµÙÙØ© اÙÙ
عÙÙÙ
ات ÙÙ Ø§ÙØ£Ø®ØšØ§Ø± ÙÙ
ÙØ¬Ø²Ø§Øª اÙÙØ³Ø§ØŠØ· Ø§ÙØ§Ø¬ØªÙ
Ø§Ø¹ÙØ© (`Ø§ÙØ·Ùس`Ø `Ø§ÙØ±Ùاضة`Ø `Ø§ÙØªÙ
ÙÙÙ`Ø Ø¥ÙØ®).
```py
>>> from transformers import pipeline
>>> classifier = pipeline(task="sentiment-analysis")
>>> preds = classifier("Hugging Face is the best thing since sliced bread!")
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> preds
[{'score': 0.9991, 'label': 'POSITIVE'}]
```
### تصÙÙÙ Ø§ÙØ±Ù
ÙØ²
Ù٠أ٠Ù
ÙÙ
Ø© Ù
Ù Ù
ÙØ§Ù
Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØºØ© Ø§ÙØ·ØšÙØ¹ÙØ© NLPØ ØªØªÙ
Ù
Ø¹Ø§ÙØ¬Ø© اÙÙØµ Ù
سؚÙÙØ§ ع٠طرÙÙ ØªÙØ³ÙÙ
٠إÙÙ ÙÙÙ
ات Ø£Ù Ù
ÙØ§Ø·Ø¹ ÙÙÙ
ات ÙØ±Ø¯ÙØ© ØªÙØ¹Ø±Ù ؚاسÙ
[Ø§ÙØ±Ù
ÙØ²](glossary#token). ÙÙÙÙ
تصÙÙÙ Ø§ÙØ±Ù
ÙØ² (Token Classification) ØšØªØ®ØµÙØµ تصÙÙÙ ÙÙ٠رÙ
ز Ù
Ù Ù
جÙ
ÙØ¹Ø© Ù
ØØ¯Ø¯Ø© Ù
سؚÙÙØ§ Ù
Ù Ø§ÙØªØµÙÙÙØ§Øª.
ÙÙØ§Ù ÙÙØ¹Ø§Ù ؎ا؊عا٠Ù
٠تصÙÙÙ Ø§ÙØ±Ù
ÙØ²:
* Ø§ÙØªØ¹Ø±Ù عÙ٠اÙÙÙØ§Ùات اÙÙ
سÙ
اة (NER): تصÙÙÙ Ø§ÙØ±Ù
ÙØ² ÙÙÙÙØ§ ÙÙØŠØ© اÙÙÙØ§Ù Ù
ث٠اÙÙ
ÙØžÙ
Ø© Ø£Ù Ø§ÙØŽØ®Øµ أ٠اÙÙ
ÙÙØ¹ Ø£Ù Ø§ÙØªØ§Ø±ÙØ®. ÙØ¹Ø¯ NER ØŽØ§ØŠØ¹ÙØ§ ؚ؎Ù٠خاص ÙÙ Ø§ÙØ¥Ø¹Ø¯Ø§Ø¯Ø§Øª Ø§ÙØ·ØšÙØ© Ø§ÙØÙÙÙØ©Ø ØÙØ« ÙÙÙ
ÙÙ٠تصÙÙÙ Ø§ÙØ¬ÙÙØ§Øª ÙØ§ÙØšØ±ÙØªÙÙØ§Øª ÙØ£Ø³Ù
اء Ø§ÙØ£Ø¯ÙÙØ©.
* ترÙ
ÙØ² Ø§ÙØ£Ø¬Ø²Ø§Ø¡ اÙÙØºÙÙØ© (POS): تصÙÙÙ Ø§ÙØ±Ù
ÙØ² ÙÙÙÙØ§ ÙÙØ¯ÙØ±ÙØ§ اÙÙØÙÙ Ù
Ø«Ù Ø§ÙØ§Ø³Ù
أ٠اÙÙØ¹Ù Ø£Ù Ø§ÙØµÙØ©. POS Ù
ÙÙØ¯ ÙÙ
ساعدة Ø£ÙØžÙ
Ø© Ø§ÙØªØ±Ø¬Ù
Ø© عÙÙ ÙÙÙ
ÙÙÙÙØ© Ø§Ø®ØªÙØ§Ù ÙÙÙ
تÙÙ Ù
ØªØ·Ø§ØšÙØªÙÙ ÙØÙÙÙØ§ (Ù
ث٠ÙÙÙ
Ø© "عÙÙÙÙ
Ù" ÙØ§Ø³Ù
Ù "عÙÙÙÙ
Ù" ÙÙØ¹Ù).
```py
>>> from transformers import pipeline
>>> classifier = pipeline(task="ner")
>>> preds = classifier("Hugging Face is a French company based in New York City.")
>>> preds = [
... {
... "entity": pred["entity"],
... "score": round(pred["score"], 4),
... "index": pred["index"],
... "word": pred["word"],
... "start": pred["start"],
... "end": pred["end"],
... }
... for pred in preds
... ]
>>> print(*preds, sep="\n")
{'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2}
{'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7}
{'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12}
{'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24}
{'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45}
{'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50}
{'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55}
```
### Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø§ÙØ£Ø³ØŠÙØ©
ØªÙØ¹Ø¯Ù Ù
ÙÙ
Ø© Ø§ÙØ¥Ø¬Ø§ØšØ© Ø¹Ù Ø§ÙØ£Ø³ØŠÙØ© (Question Answering) Ù
ÙÙ
Ø© أخر٠عÙÙ Ù
ستÙÙ Ø§ÙØ±Ù
ÙØ² (Token-Level) ØªÙØ±Ø¬Ø¹ إجاؚة ÙØ³Ø€Ø§Ù Ù
Ø§Ø ÙÙØ¯ تعتÙ
د ÙØ°Ù Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø³ÙØ§Ù (Ù٠اÙÙØ·Ø§Ù اÙÙ
ÙØªÙØ - Open-Domain) Ø£Ù ÙØ§ تعتÙ
د عÙÙ Ø³ÙØ§Ù (Ù٠اÙÙØ·Ø§Ù اÙÙ
غÙÙ - Closed-Domain). ØªØØ¯Ø« ÙØ°Ù اÙÙ
ÙÙ
Ø© Ø¹ÙØ¯Ù
ا ÙØ³Ø£Ù Ù
Ø³Ø§Ø¹Ø¯ÙØ§ Ø§ÙØªØ±Ø§Ø¶ÙÙØ§ Ø¹Ù ØŽÙØ¡ Ù
Ø§Ø Ù
ث٠Ù
Ø¹Ø±ÙØ© Ù
ا إذا ÙØ§Ù Ù
طعÙ
Ù Ù
ا Ù
ÙØªÙØÙا. ÙÙ
Ù٠أ٠تÙÙØ¯ÙÙ
ÙØ°Ù اÙÙ
ÙÙ
Ø© Ø£ÙØ¶Ùا دعÙ
ÙØ§ ÙÙØ¹Ù
ÙØ§Ø¡ أ٠دعÙ
ÙØ§ تÙÙÙÙØ§Ø ÙÙ
ا ØªÙØ³Ø§Ø¹Ø¯ Ù
ØØ±Ùات Ø§ÙØšØØ« Ù٠استرجاع اÙÙ
عÙÙÙ
ات ذات Ø§ÙØµÙØ© Ø§ÙØªÙ ÙØšØØ« عÙÙØ§.
ÙÙØ§Ù ÙÙØ¹Ø§Ù ؎ا؊عا٠Ù
Ù Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø§ÙØ£Ø³ØŠÙØ©:
* Ø§ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ÙØ© (Extractive): ؚاÙÙØžØ± Ø¥ÙÙ Ø³Ø€Ø§Ù ÙØ³Ùا٠Ù
ÙØ¹ÙÙÙØ ÙØ¥Ù Ø§ÙØ¥Ø¬Ø§ØšØ© ÙÙ Ù
ÙØ·Ø¹ ÙØµÙÙ Ù
ÙØ³ØªØ®Ø±Ø¬ Ù
Ù Ø§ÙØ³ÙØ§Ù Ø§ÙØ°Ù ÙÙØÙÙÙ٠اÙÙÙ
ÙØ°Ø¬.
* Ø§ÙØªØ¬Ø±ÙØ¯ÙØ© (Abstractive): ؚاÙÙØžØ± Ø¥ÙÙ Ø³Ø€Ø§Ù ÙØ³Ùا٠Ù
ÙØ¹ÙÙÙØ ÙØªÙ
Ø¥ÙØŽØ§Ø¡ Ø§ÙØ¥Ø¬Ø§ØšØ© Ù
Ù Ø§ÙØ³ÙØ§ÙØ ÙØªØ¹Ø§Ù
Ù ÙÙØ¬ [`Text2TextGenerationPipeline`] Ù
ع ÙØ°Ø§ اÙÙÙØ¬ ØšØ¯ÙØ§Ù Ù
Ù [`QuestionAnsweringPipeline`] اÙÙ
ÙØ¶Ø Ø£Ø¯ÙØ§Ù
```py
>>> from transformers import pipeline
>>> question_answerer = pipeline(task="question-answering")
>>> preds = question_answerer(
... question="What is the name of the repository?",
... context="The name of the repository is huggingface/transformers",
... )
>>> print(
... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}"
... )
score: 0.9327, start: 30, end: 54, answer: huggingface/transformers
```
### Ø§ÙØªÙØ®ÙØµ
ÙÙØŽØŠ Ø§ÙØªÙØ®ÙØµ (Summarization) ÙØ³Ø®Ø© Ù
ختصرة Ù
Ù ÙØµ Ø·ÙÙÙ Ù
ع Ù
ØØ§ÙÙØ© Ø§ÙØÙØ§Øž عÙÙ Ù
ع؞Ù
Ù
عÙ٠اÙÙØµ Ø§ÙØ£ØµÙÙ. Ø§ÙØªÙØ®ÙØµ ÙÙ Ù
ÙÙ
Ø© ØªØ³ÙØ³Ù Ø¥ÙÙ ØªØ³ÙØ³Ù(Sequence-to-Sequence)ØØ ÙÙ٠تÙÙØªØ¬ ØªØ³ÙØ³ÙÙØ§ ÙØµÙÙØ§ Ø£ÙØµØ± Ù
٠اÙÙØµ اÙÙ
ÙØ¯Ø®Ù. ÙÙØ§Ù اÙÙØ«Ùر Ù
٠اÙÙ
Ø³ØªÙØ¯Ø§Øª Ø§ÙØ·ÙÙÙØ© Ø§ÙØªÙ ÙÙ
ÙÙ ØªÙØ®ÙØµÙØ§ ÙÙ
ساعدة اÙÙØ±Ø§Ø¡ عÙÙ ÙÙÙ
اÙÙÙØ§Ø· Ø§ÙØ±ØŠÙØ³ÙØ© ؚسرعة. Ù
ØŽØ§Ø±ÙØ¹ اÙÙÙØ§ÙÙÙ ÙØ§ÙÙØ«Ø§ØŠÙ اÙÙØ§ÙÙÙÙØ© ÙØ§ÙÙ
اÙÙØ© ÙØšØ±Ø§Ø¡Ø§Øª Ø§ÙØ§Ø®ØªØ±Ø§Ø¹ ÙØ§ÙØ£ÙØ±Ø§Ù Ø§ÙØ¹ÙÙ
ÙØ© ÙÙ Ù
جرد Ø£Ù
Ø«ÙØ© ÙÙÙÙØ© ÙÙÙØ«Ø§ØŠÙ Ø§ÙØªÙ ÙÙ
ÙÙ ØªÙØ®ÙØµÙØ§ ÙØªÙÙÙØ± ÙÙØª اÙÙØ±Ø§Ø¡ ÙØ®Ø¯Ù
Ø© ÙÙ
ساعد ÙÙÙØ±Ø§Ø¡Ø©.
Ù
Ø«Ù Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø§ÙØ£Ø³ØŠÙØ©Ø ÙÙØ§Ù ÙÙØ¹Ø§Ù Ù
Ù Ø§ÙØªÙØ®ÙØµ:
* Ø§ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ÙØ© (Extractive): ØªØØ¯Ùد ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ Ø£ÙÙ
Ø§ÙØ¬Ù
Ù Ù
٠اÙÙØµ Ø§ÙØ£ØµÙÙ
* Ø§ÙØªØ¬Ø±Ùد٠(Abstractive): Ø¥ÙØŽØ§Ø¡ Ù
ÙØ®Øµ Ù
Ø³ØªÙØ¯Ù (Ø§ÙØ°Ù ÙØ¯ ÙØªØ¶Ù
Ù ÙÙÙ
ات Ø¬Ø¯ÙØ¯Ø© ØºÙØ± Ù
ÙØ¬Ùدة Ù٠اÙÙØµ Ø§ÙØ£ØµÙÙ) Ø§ÙØ·ÙاÙÙØ§ Ù
٠اÙÙØµ Ø§ÙØ£ØµÙÙØ ÙØ³ØªØ®Ø¯Ù
ÙÙØ¬ Ø§ÙØªÙØ®ÙØµ Ø§ÙØªØ¬Ø±Ùد٠[`SummarizationPipeline`]
```py
>>> from transformers import pipeline
>>> summarizer = pipeline(task="summarization")
>>> summarizer(
... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles."
... )
[{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}]
```
### Ø§ÙØªØ±Ø¬Ù
Ø©
تØÙÙÙ Ø§ÙØªØ±Ø¬Ù
Ø© ØªØ³ÙØ³Ù ÙØµ ØšÙØºØ© Ø¥ÙÙ ÙØºØ© أخرÙ. Ù
٠اÙÙ
ÙÙ
Ù
ساعدة Ø§ÙØ£ØŽØ®Ø§Øµ Ù
Ù Ø®ÙÙÙØ§Øª Ù
ختÙÙØ© عÙÙ Ø§ÙØªÙاص٠Ù
ع ؚعضÙÙ
Ø§ÙØšØ¹Ø¶Ø ÙÙ
ساعدة اÙÙ
ØØªÙ٠عÙ٠اÙÙØµÙ٠إÙ٠جÙ
ÙÙØ± Ø£ÙØ³Ø¹Ø ÙØØªÙ Ø£Ù ÙÙÙ٠أداة تعÙÙÙ
ÙØ© ÙÙ
ساعدة Ø§ÙØ£ØŽØ®Ø§Øµ عÙ٠تعÙÙ
ÙØºØ© Ø¬Ø¯ÙØ¯Ø©. Ø¥ÙÙ Ø¬Ø§ÙØš Ø§ÙØªÙØ®ÙØµØ تعد Ø§ÙØªØ±Ø¬Ù
Ø© Ù
ÙÙ
Ø© Ù
Ù ÙÙØ¹ ØªØ³ÙØ³Ù Ø¥ÙÙ ØªØ³ÙØ³ÙØ ØÙØ« ÙØªÙÙ٠اÙÙÙ
ÙØ°Ø¬ ØªØ³ÙØ³ÙÙØ§ Ù
ÙØ¯Ø®ÙÙØ§ ÙÙÙØ¹Ùد ØªØ³ÙØ³ÙÙØ§ Ù
ÙØ®Ø±ÙØ¬ÙØ§ Ù
ÙØ³ØªÙدÙÙØ§.
ÙÙ Ø§ÙØ£ÙاÙ
Ø§ÙØ£ÙÙÙØ ÙØ§Ùت ÙÙ
اذج Ø§ÙØªØ±Ø¬Ù
Ø© ÙÙ Ø§ÙØºØ§ÙØš Ø£ØØ§Ø¯ÙØ© اÙÙØºØ©Ø ÙÙÙÙ Ù
Ø€Ø®Ø±ÙØ§Ø ÙØ§Ù ÙÙØ§Ù Ø§ÙØªÙ
اÙ
Ù
ØªØ²Ø§ÙØ¯ ؚاÙÙÙ
اذج Ù
تعددة اÙÙØºØ§Øª Ø§ÙØªÙ ÙÙ
ÙÙÙØ§ Ø§ÙØªØ±Ø¬Ù
Ø© ØšÙÙ Ø§ÙØ¹Ø¯Ùد Ù
Ù Ø£Ø²ÙØ§Ø¬ اÙÙØºØ§Øª.
```py
>>> from transformers import pipeline
>>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning."
>>> translator = pipeline(task="translation", model="google-t5/t5-small")
>>> translator(text)
[{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}]
```
### ÙÙ
ذجة اÙÙØºØ©
ÙÙ
ذجة اÙÙØºØ© (Language Modeling) ÙÙ Ù
ÙÙ
Ø© Ø§ÙØªÙؚ؀ ؚاÙÙÙÙ
Ø© Ø§ÙØªØ§ÙÙØ© ÙÙ ØªØ³ÙØ³Ù ÙØµÙ. ÙÙØ¯ Ø£ØµØšØ Ù
ÙÙ
Ø© NLP ؎ا؊عة ÙÙØºØ§ÙØ© ÙØ£Ù اÙÙÙ
ÙØ°Ø¬ اÙÙØºÙ٠اÙÙ
Ø³ØšÙ Ø§ÙØªØ¯Ø±ÙØš ÙÙ
ÙÙ Ø£Ù ÙØªÙ
ضؚط٠ؚ؎Ù٠دÙÙÙ ÙÙØ¹Ø¯Ùد Ù
Ù Ù
ÙØ§Ù
Ø§ÙØ£Ø®Ø±Ù. ÙÙ Ø§ÙØ¢ÙÙØ© Ø§ÙØ£Ø®ÙØ±Ø©Ø ÙØ§Ù ÙÙØ§Ù اÙÙØ«Ùر Ù
Ù Ø§ÙØ§ÙتÙ
اÙ
ØšÙÙ
اذج اÙÙØºØ© اÙÙØšÙرة (LLMs) Ø§ÙØªÙ ØªÙØ¶Ø Ø§ÙØªØ¹ÙÙ
Ù
Ù Ø§ÙØµÙر Ø£Ù Ù
٠عدد ÙÙÙÙ Ù
Ù Ø§ÙØ£Ù
Ø«ÙØ© (Zero-shot or Few-shot Learning). ÙÙØ°Ø§ ÙØ¹Ù٠أ٠اÙÙÙ
ÙØ°Ø¬ ÙÙ
ÙÙÙ ØÙ اÙÙ
ÙØ§Ù
Ø§ÙØªÙ ÙÙ
ÙØªÙ
ØªØ¯Ø±ÙØšÙ عÙÙÙØ§ ؚ؎ÙÙ ØµØ±ÙØ! ÙÙ
Ù٠استخداÙ
ÙÙ
اذج اÙÙØºØ© ÙØ¥Ù؎اء ÙØµ Ø³ÙØ³ ÙÙ
ÙÙØ¹Ø عÙÙ Ø§ÙØ±ØºÙ
Ù
٠أÙÙ ÙØ¬Øš أ٠تÙÙÙ ØØ°Ø±Ùا ÙØ£Ù اÙÙØµ ÙØ¯ ÙØ§ ÙÙÙ٠دا؊Ù
ÙØ§ دÙÙÙÙØ§.
ÙÙØ§Ù ÙÙØ¹Ø§Ù Ù
Ù ÙÙ
ذجة اÙÙØºØ©:
* Ø§ÙØ³ØšØšÙØ©(Causal): ÙØ¯Ù اÙÙÙ
ÙØ°Ø¬ ÙÙ Ø§ÙØªÙؚ؀ ØšØ§ÙØ±Ù
ز (Token) Ø§ÙØªØ§ÙÙ ÙÙ Ø§ÙØªØ³ÙØ³ÙØ ÙÙØªÙ
Ø¥Ø®ÙØ§Ø¡ Ø§ÙØ±Ù
ÙØ² اÙÙ
Ø³ØªÙØšÙÙØ© (Masking).
```py
>>> from transformers import pipeline
>>> prompt = "Hugging Face is a community-based open-source platform for machine learning."
>>> generator = pipeline(task="text-generation")
>>> generator(prompt) # doctest: +SKIP
```
* اÙÙ
ÙÙÙØ¹ (Masked): ÙØ¯Ù اÙÙÙ
ÙØ°Ø¬ ÙÙ Ø§ÙØªÙؚ؀ ؚرÙ
ز Ù
ÙØ®ÙÙ٠ضÙ
Ù Ø§ÙØªØ³Ùس٠Ù
ع اÙÙØµÙ٠اÙÙØ§Ù
٠إÙÙ Ø§ÙØ±Ù
ÙØ² Ø§ÙØ£Ø®Ø±Ù ÙÙ Ø§ÙØªØ³ÙسÙ
```py
>>> text = "Hugging Face is a community-based open-source <mask> for machine learning."
>>> fill_mask = pipeline(task="fill-mask")
>>> preds = fill_mask(text, top_k=1)
>>> preds = [
... {
... "score": round(pred["score"], 4),
... "token": pred["token"],
... "token_str": pred["token_str"],
... "sequence": pred["sequence"],
... }
... for pred in preds
... ]
>>> preds
[{'score': 0.2236,
'token': 1761,
'token_str': ' platform',
'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}]
```
## Ù
تعدد اÙÙØ³Ø§ØŠØ·:
ØªØªØ·ÙØš Ø§ÙÙ
ÙØ§Ù
Ù
تعددة اÙÙØ³Ø§ØŠØ· (Multimodal) Ù
٠اÙÙÙ
ÙØ°Ø¬ Ù
Ø¹Ø§ÙØ¬Ø© ÙØ³Ø§ØŠØ· ØšÙØ§Ùات Ù
تعددة (ÙØµ Ø£Ù ØµÙØ±Ø© Ø£Ù ØµÙØª Ø£Ù ÙÙØ¯ÙÙ) ÙØÙ Ù
ØŽÙÙØ© Ù
عÙÙØ©. ÙØ¹Ø¯ ÙØµÙ Ø§ÙØµÙرة (Image Captioning) Ù
Ø«Ø§ÙØ§Ù عÙÙ Ù
ÙÙ
Ø© Ù
تعددة اÙÙØ³Ø§ØŠØ· ØÙØ« ÙØ£Ø®Ø° اÙÙÙ
ÙØ°Ø¬ ØµÙØ±Ø© ÙÙ
دخ٠ÙÙÙØªØ¬ ØªØ³ÙØ³Ù ÙØµÙÙØ§ ÙØµÙ Ø§ÙØµÙرة أ٠ؚعض Ø®ØµØ§ØŠØµÙØ§.
عÙÙ Ø§ÙØ±ØºÙ
Ù
٠أ٠اÙÙÙ
اذج Ù
تعددة اÙÙØ³Ø§ØŠØ· تعÙ
Ù Ù
ع Ø£ÙÙØ§Ø¹ Ø£Ù ÙØ³Ø§ØŠØ· ØšÙØ§Ùات Ù
ختÙÙØ©Ø Ø¥ÙØ§ Ø£Ù Ø®Ø·ÙØ§Øª اÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙ
Ø³ØšÙØ© تساعد اÙÙÙ
ÙØ°Ø¬ داخÙÙÙØ§ عÙ٠تØÙÙ٠جÙ
ÙØ¹ Ø£ÙÙØ§Ø¹ Ø§ÙØšÙØ§ÙØ§Øª Ø¥ÙÙ Ù
ØªØ¬ÙØ§Øª تضÙ
ÙÙ (Embeddings) (Ù
ØªØ¬ÙØ§Øª Ø£Ù ÙÙØ§ØŠÙ
Ù
Ù Ø§ÙØ£Ø±ÙاÙ
Ø§ÙØªÙ ØªØØªÙ٠عÙÙ Ù
عÙÙÙ
ات ذات Ù
عÙÙ ØÙÙ Ø§ÙØšÙØ§ÙØ§Øª). ؚاÙÙØ³ØšØ© ÙÙ
ÙÙ
Ø© Ù
Ø«Ù ÙØµÙ Ø§ÙØµÙØ±Ø©Ø ÙØªØ¹ÙÙ
اÙÙÙ
ÙØ°Ø¬ Ø§ÙØ¹ÙØ§ÙØ§Øª ØšÙÙ Ù
ØªØ¬ÙØ§Øª تضÙ
ÙÙ Ø§ÙØµÙر ÙÙ
ØªØ¬ÙØ§Øª تضÙ
Ù٠اÙÙØµ.
### Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø£Ø³ØŠÙØ© اÙÙ
Ø³ØªÙØ¯Ø§Øª:
Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø£Ø³ØŠÙØ© اÙÙ
Ø³ØªÙØ¯Ø§Øª (Document Question Answering) ÙÙ Ù
ÙÙ
Ø© تÙÙÙ
ØšØ§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø£Ø³ØŠÙØ© اÙÙØºØ© Ø§ÙØ·ØšÙØ¹ÙØ© Ù
Ù Ù
Ø³ØªÙØ¯ Ù
ÙØ¹Ø·Ù. عÙÙ Ø¹ÙØ³ Ù
ÙÙ
Ø© Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø§ÙØ£Ø³ØŠÙØ© عÙÙ Ù
ستÙÙ Ø§ÙØ±Ù
ÙØ² (Token-Level) Ø§ÙØªÙ تأخذ ÙØµÙا ÙÙ
Ø¯Ø®ÙØ ÙØ¥Ù Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø£Ø³ØŠÙØ© اÙÙ
Ø³ØªÙØ¯Ø§Øª تأخذ ØµÙØ±Ø© ÙÙ
Ø³ØªÙØ¯ ÙÙ
Ø¯Ø®Ù ØšØ§ÙØ¥Ø¶Ø§ÙØ© Ø¥ÙÙ Ø³Ø€Ø§Ù ÙØ°Ø§ ØÙ٠اÙÙ
Ø³ØªÙØ¯ ÙØªØ¹Ùد Ø§ÙØ¥Ø¬Ø§ØšØ©. ÙÙ
Ù٠استخداÙ
Ø§ÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø£Ø³ØŠÙØ© اÙÙ
Ø³ØªÙØ¯Ø§Øª ÙØªÙØ³ÙØ± اÙÙ
Ø³ØªÙØ¯Ø§Øª اÙÙ
ÙÙØ³ÙÙØ© ÙØ§Ø³ØªØ®Ø±Ø§Ø¬ اÙÙ
عÙÙÙ
ات Ø§ÙØ±ØŠÙØ³ÙØ© Ù
ÙÙØ§. Ù٠اÙÙ
Ø«Ø§Ù Ø£Ø¯ÙØ§ÙØ ÙÙ
Ù٠استخراج اÙÙ
ØšÙØº Ø§ÙØ¥Ø¬Ù
اÙÙ ÙØ§ÙÙ
ØšÙØº اÙÙ
ÙØ³ØªØ±Ø¯ Ù
Ù Ø¥ÙØµØ§Ù Ø§ÙØ¯Ùع..
```py
>>> from transformers import pipeline
>>> from PIL import Image
>>> import requests
>>> url = "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/jpeg_images/2.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices")
>>> preds = doc_question_answerer(
... question="Ù
ا Ù٠اÙÙ
ØšÙØº Ø§ÙØ¥Ø¬Ù
اÙÙØ",
... image=image,
... )
>>> preds
[{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}]
```
ÙØ£Ù
٠أ٠تÙÙÙ ÙØ°Ù Ø§ÙØµÙØØ© ÙØ¯ Ø²ÙØ¯ØªÙ ؚؚعض اÙÙ
عÙÙÙ
ات Ø§ÙØ£Ø³Ø§Ø³ÙØ© ØÙ٠جÙ
ÙØ¹ Ø£ÙÙØ§Ø¹ اÙÙ
ÙØ§Ù
ÙÙ Ù٠طرÙÙØ© ÙØ£ÙÙ
ÙØ© ÙÙ Ù
ÙÙØ§ Ø§ÙØ¹Ù
ÙÙØ©. Ù٠اÙÙØ³Ù
Ø§ÙØªØ§ÙÙØ ستتعÙÙ
ÙÙ٠تعÙ
Ù Ù
ÙØªØšØ© ð€ Transformers ÙØÙ ÙØ°Ù اÙÙ
ÙØ§Ù
. | transformers/docs/source/ar/task_summary.md/0 | {
"file_path": "transformers/docs/source/ar/task_summary.md",
"repo_id": "transformers",
"token_count": 14746
} |
# Ø§Ø³ØªÙØŽØ§Ù Ø§ÙØ£Ø®Ø·Ø§Ø¡ ÙØ¥ØµÙاØÙا
ØªØØ¯Ø« Ø§ÙØ£Ø®Ø·Ø§Ø¡ Ø£ØÙاÙÙØ§Ø ÙÙÙÙØ§ ÙÙØ§ ÙÙÙ
ساعدة! ÙØºØ·Ù ÙØ°Ø§ Ø§ÙØ¯ÙÙ٠ؚعض اÙÙ
ØŽÙÙØ§Øª Ø§ÙØ£Ùثر ØŽÙÙØ¹Ùا Ø§ÙØªÙ ÙØ§Ø¬ÙÙØ§Ùا ÙÙÙÙÙØ© ØÙÙØ§. Ù
ع ذÙÙØ ÙØ§ ÙÙÙØµØ¯ ØšÙØ°Ø§ Ø§ÙØ¯ÙÙ٠أ٠ÙÙÙÙ Ù
جÙ
ÙØ¹Ø© ؎اÙ
ÙØ© ÙÙÙ Ù
ØŽÙÙØ§Øª ð€ Transformers. ÙÙ
Ø²ÙØ¯ Ù
٠اÙÙ
ساعدة ÙÙ Ø§Ø³ØªÙØŽØ§Ù Ù
ØŽÙÙØªÙ ÙØ¥ØµÙاØÙØ§Ø Ø¬Ø±Øš Ù
ا ÙÙÙ:
<Youtube id="S2EEG3JIt2A"/>
1. Ø§Ø·ÙØš Ø§ÙÙ
ساعدة عÙÙ [اÙÙ
ÙØªØ¯Ùات](https://discuss.huggingface.co/). ÙÙØ§Ù ÙØŠØ§Øª Ù
ØØ¯Ø¯Ø© ÙÙ
ÙÙÙ ÙØŽØ± س؀اÙÙ ÙÙÙØ§Ø Ù
ث٠[اÙÙ
ؚتد؊ÙÙ](https://discuss.huggingface.co/c/beginners/5) Ø£Ù [ð€ Transformers](https://discuss.huggingface.co/c/transformers/9). ØªØ£ÙØ¯ Ù
Ù ÙØªØ§ØšØ© Ù
ÙØŽÙر Ø¬ÙØ¯ ÙÙØ§Ø¶Ø عÙ٠اÙÙ
ÙØªØ¯Ù Ù
ع ؚعض Ø§ÙØªØ¹ÙÙÙ
ات Ø§ÙØšØ±Ù
Ø¬ÙØ© اÙÙØ§ØšÙØ© ÙÙØªÙرار ÙØ²Ùادة Ø§ØØªÙ
اÙÙØ© ØÙ Ù
ØŽÙÙØªÙ!
<Youtube id="_PAli-V4wj0"/>
2. ÙÙ
ØšØ¥ÙØŽØ§Ø¡ [Ù
ØŽÙÙØ©](https://github.com/huggingface/transformers/issues/new/choose) ÙÙ Ù
Ø³ØªÙØ¯Ø¹ ð€ Transformers إذا ÙØ§Ùت ÙÙØ§Ù Ù
ØŽÙÙØ© Ù
تعÙÙØ© ؚاÙÙ
ÙØªØšØ©. ØØ§Ù٠تضÙ
ÙÙ Ø£ÙØšØ± ÙØ¯Ø± Ù
Ù
ÙÙ Ù
٠اÙÙ
عÙÙÙ
ات Ø§ÙØªÙ تص٠اÙÙ
ØŽÙÙØ© ÙÙ
Ø³Ø§Ø¹Ø¯ØªÙØ§ ÙÙ Ù
Ø¹Ø±ÙØ© Ù
ا ÙÙ Ø§ÙØ®Ø·Ø£ ÙÙÙÙÙØ© Ø¥ØµÙØ§ØÙ.
3. تØÙÙ Ù
٠دÙÙÙ [Ø§ÙØªØ±ØÙÙ](migration) إذا ÙÙØª تستخدÙ
Ø¥ØµØ¯Ø§Ø±ÙØ§ Ø£ÙØ¯Ù
Ù
Ù Ù
ÙØªØšØ© ð€ Transformers ØÙØ« تÙ
إدخا٠ؚعض Ø§ÙØªØºÙÙØ±Ø§Øª اÙÙ
ÙÙ
Ø© ØšÙÙ Ø§ÙØ¥ØµØ¯Ø§Ø±Ø§Øª.
ÙÙØØµÙ٠عÙÙ Ù
Ø²ÙØ¯ Ù
Ù Ø§ÙØªÙاصÙÙ ØÙÙ Ø§Ø³ØªÙØŽØ§Ù Ø§ÙØ£Ø®Ø·Ø§Ø¡ ÙØ¥ØµÙاØÙا ÙØ§ÙØØµÙ٠عÙ٠اÙÙ
Ø³Ø§Ø¹Ø¯Ø©Ø Ø±Ø§Ø¬Ø¹ [اÙÙØµÙ 8](https://huggingface.co/course/chapter8/1?fw=pt) Ù
Ù Ø¯ÙØ±Ø© Hugging Face.
## ØšÙØŠØ§Øª جدار Ø§ÙØÙ
Ø§ÙØ©
ؚعض ÙØØ¯Ø§Øª Ù
Ø¹Ø§ÙØ¬Ø© Ø§ÙØ±Ø³ÙÙ
ات (GPU) عÙÙ Ø§ÙØ³ØØ§ØšØ© ÙØ¥Ø¹Ø¯Ø§Ø¯Ø§Øª Ø§ÙØŽØšÙØ© Ø§ÙØ¯Ø§Ø®ÙÙØ© Ù
ØÙ
ÙØ© ؚجدار ØÙ
Ø§ÙØ© Ù
Ù Ø§ÙØ§ØªØµØ§Ùات Ø§ÙØ®Ø§Ø±Ø¬ÙØ©Ø Ù
Ù
ا ÙØ€Ø¯Ù Ø¥ÙÙ ØØ¯ÙØ« خطأ ÙÙ Ø§ÙØ§ØªØµØ§Ù. Ø¹ÙØ¯Ù
ا ØªØØ§Ù٠تعÙÙÙ
ات Ø§ÙØšØ±ÙاÙ
ج اÙÙØµÙ ØªÙØ²ÙÙ Ø£ÙØ²Ø§Ù اÙÙÙ
ÙØ°Ø¬ Ø£Ù Ù
جÙ
ÙØ¹Ø§Øª Ø§ÙØšÙØ§ÙØ§ØªØ Ø³ÙØªÙÙÙ Ø§ÙØªÙزÙÙ Ø«Ù
ÙÙØªÙ٠ؚخطأ Ù
Ø«Ù:
```
ValueError: Connection error, and we cannot find the requested files in the cached path.
Please try again or make sure your Internet connection is on.
```
ÙÙ ÙØ°Ù Ø§ÙØØ§ÙØ©Ø ÙØ¬Øš Ù
ØØ§ÙÙØ© ت؎غÙÙ ð€ Transformers ÙÙ [ÙØ¶Ø¹ عدÙ
Ø§ÙØ§ØªØµØ§Ù](installation#offline-mode) ÙØªØ¬ÙØš خطأ Ø§ÙØ§ØªØµØ§Ù.
## CUDA ÙÙØ§Ø¯ Ø§ÙØ°Ø§Ùرة
ÙÙ
Ù٠أ٠ÙÙÙÙ ØªØ¯Ø±ÙØš Ø§ÙÙÙ
اذج اÙÙØšÙرة Ø§ÙØªÙ ØªØØªÙ٠عÙÙ Ù
ÙØ§ÙÙ٠اÙÙ
عÙÙ
ات Ø£Ù
Ø±ÙØ§ ØµØ¹ØšÙØ§ ؚدÙÙ Ø§ÙØ£Ø¬Ùزة اÙÙ
ÙØ§Ø³ØšØ©. Ø£ØØ¯ Ø§ÙØ£Ø®Ø·Ø§Ø¡ Ø§ÙØŽØ§ØŠØ¹Ø© Ø§ÙØªÙ ÙØ¯ ØªÙØ§Ø¬ÙÙØ§ Ø¹ÙØ¯ ÙÙØ§Ø¯ Ø°Ø§ÙØ±Ø© GPU ÙÙ:
```
CUDA out of memory. Tried to allocate 256.00 MiB (GPU 0; 11.17 GiB total capacity; 9.70 GiB already allocated; 179.81 MiB free; 9.85 GiB reserved in total by PyTorch)
```
ÙÙÙ
ا ÙÙ٠ؚعض Ø§ÙØÙÙ٠اÙÙ
ØØªÙ
ÙØ© Ø§ÙØªÙ ÙÙ
ÙÙÙ ØªØ¬Ø±ØšØªÙØ§ ÙØªÙÙÙ٠استخداÙ
Ø§ÙØ°Ø§Ùرة:
- ÙÙÙ Ù
Ù ÙÙÙ
Ø© [`per_device_train_batch_size`](main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size) ÙÙ [`TrainingArguments`].
- ØØ§Ù٠استخداÙ
[`gradient_accumulation_steps`](main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps) ÙÙ [`TrainingArguments`] ÙØ²Ùادة ØØ¬Ù
Ø§ÙØ¯ÙÙØ¹Ø© ؚ؎ÙÙ ÙØ¹Ø§Ù.
<Tip>
راجع دÙÙÙ [Ø§ÙØ£Ø¯Ø§Ø¡](performance) ÙÙ
Ø²ÙØ¯ Ù
Ù Ø§ÙØªÙاصÙÙ ØÙ٠تÙÙÙØ§Øª تÙÙÙØ± Ø§ÙØ°Ø§Ùرة.
</Tip>
## عدÙ
اÙÙØ¯Ø±Ø© عÙ٠تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ TensorFlow Ù
ØÙÙØž
تÙÙÙ
طرÙÙØ© TensorFlow [model.save](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) ØšØÙØž اÙÙÙ
ÙØ°Ø¬ ؚاÙÙØ§Ù
Ù - اÙÙÙØ¯Ø³Ø© اÙÙ
عÙ
Ø§Ø±ÙØ©Ø Ø§ÙØ£ÙØ²Ø§ÙØ تÙÙÙÙ Ø§ÙØªØ¯Ø±ÙØš - ÙÙ Ù
ÙÙ ÙØ§ØØ¯. ÙÙ
ع ذÙÙØ Ø¹ÙØ¯ تØÙ
ÙÙ Ù
Ù٠اÙÙÙ
ÙØ°Ø¬ Ù
رة Ø£Ø®Ø±ÙØ ÙØ¯ ØªÙØ§Ø¬Ù خطأ ÙØ£Ù Ù
ÙØªØšØ© ð€ Transformers ÙØ¯ ÙØ§ تÙÙÙ
ؚتØÙ
Ù٠جÙ
ÙØ¹ اÙÙØ§ØŠÙات اÙÙ
تعÙÙØ© ØšÙ TensorFlow ÙÙ Ù
Ù٠اÙÙÙ
ÙØ°Ø¬. ÙØªØ¬ÙØš اÙÙ
ØŽÙÙØ§Øª اÙÙ
تعÙÙØ© ØšØÙØž ÙØªØÙ
ÙÙ ÙÙ
اذج TensorFlowØ ÙÙØµÙ ØšÙ
ا ÙÙÙ:
- اØÙØž Ø£ÙØ²Ø§Ù اÙÙÙ
ÙØ°Ø¬ ÙÙ
ÙÙ `h5` ؚاستخداÙ
[`model.save_weights`](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) Ø«Ù
أعد تØÙ
Ù٠اÙÙÙ
ÙØ°Ø¬ ؚاستخداÙ
[`~TFPreTrainedModel.from_pretrained`]:
```python
>>> from transformers import TFPreTrainedModel
>>> from tensorflow import keras
>>> model.save_weights("some_folder/tf_model.h5")
>>> model = TFPreTrainedModel.from_pretrained("some_folder")
```
- اØÙØž اÙÙÙ
ÙØ°Ø¬ ؚاستخداÙ
[`~TFPretrainedModel.save_pretrained`] ÙÙÙ
ؚتØÙ
ÙÙÙ Ù
رة أخر٠ؚاستخداÙ
[`~TFPreTrainedModel.from_pretrained`]:
```python
>>> from transformers import TFPreTrainedModel
>>> model.save_pretrained("path_to/model")
>>> model = TFPreTrainedModel.from_pretrained("path_to/model")
```
## ImportError
خطأ ؎ا؊ع آخر ÙØ¯ ØªÙØ§Ø¬ÙÙØ خاصة إذا ÙØ§Ù ÙÙ
ÙØ°Ø¬Ùا تÙ
Ø¥ØµØ¯Ø§Ø±Ù ØØ¯ÙØ«ÙØ§Ø ÙÙ `ImportError`:
```
ImportError: cannot import name 'ImageGPTImageProcessor' from 'transformers' (unknown location)
```
ؚاÙÙØ³ØšØ© ÙØ£ÙÙØ§Ø¹ Ø§ÙØ£Ø®Ø·Ø§Ø¡ ÙØ°ÙØ ØªØÙÙ Ù
Ù Ø£Ù ÙØ¯ÙÙ Ø£ØØ¯Ø« إصدار Ù
Ù Ù
ÙØªØšØ© Hugging Face Transformers Ù
Ø«ØšØªÙØ§ ÙÙÙØµÙ٠إÙÙ Ø£ØØ¯Ø« اÙÙÙ
اذج:
```bash
pip install transformers --upgrade
```
## خطأ CUDA: تÙ
ت؎غÙÙ Ø§ÙØªØ£ÙÙØ¯ عÙÙ Ø¬Ø§ÙØš Ø§ÙØ¬Ùاز
Ù٠ؚعض Ø§ÙØ£ØÙØ§ÙØ ÙØ¯ ØªÙØ§Ø¬Ù خطأ CUDA عاÙ
ÙØ§ ØÙ٠خطأ ÙÙ ÙÙØ¯ Ø§ÙØ¬Ùاز.
```
RuntimeError: CUDA error: device-side assert triggered
```
ÙØ¬Øš عÙÙÙ Ù
ØØ§ÙÙØ© ت؎غÙ٠اÙÙÙØ¯ عÙÙ ÙØØ¯Ø© اÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙ
Ø±ÙØ²ÙØ© (CPU) Ø£ÙÙØ§Ù ÙÙØØµÙ٠عÙÙ Ø±Ø³Ø§ÙØ© خطأ Ø£ÙØ«Ø± Ø¯ÙØ©. أض٠Ù
ØªØºÙØ± Ø§ÙØšÙ؊ة Ø§ÙØªØ§ÙÙ ÙÙ ØšØ¯Ø§ÙØ© ÙÙØ¯Ù ÙÙØªØšØ¯Ù٠إÙÙ ÙØØ¯Ø© اÙÙ
Ø¹Ø§ÙØ¬Ø© اÙÙ
Ø±ÙØ²ÙØ©:
```python
>>> import os
>>> os.environ["CUDA_VISIBLE_DEVICES"] = ""
```
Ø§ÙØ®Ùار Ø§ÙØ¢Ø®Ø± ÙÙ Ø§ÙØØµÙ٠عÙ٠تتؚع Ù
ÙØ¯Ø³ Ø£ÙØ¶Ù Ù
Ù GPU. أض٠Ù
ØªØºÙØ± Ø§ÙØšÙ؊ة Ø§ÙØªØ§ÙÙ ÙÙ ØšØ¯Ø§ÙØ© ÙÙØ¯Ù ÙÙØØµÙ٠عÙ٠تتؚع اÙÙ
ÙØ¯Ø³ ÙÙØ¥ØŽØ§Ø±Ø© Ø¥ÙÙ Ù
صدر Ø§ÙØ®Ø·Ø£:
```python
>>> import os
>>> os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
```
## إخراج ØºÙØ± صØÙØ Ø¹ÙØ¯ عدÙ
Ø¥Ø®ÙØ§Ø¡ رÙ
ÙØ² Ø§ÙØØŽÙ
Ù٠ؚعض Ø§ÙØØ§ÙØ§ØªØ ÙØ¯ ÙÙÙÙ `hidden_state` ØºÙØ± صØÙØØ© إذا تضÙ
ÙØª `input_ids` رÙ
ÙØ² ØØŽÙ. ÙÙØ¥Ø«ØšØ§Øª ذÙÙØ ÙÙ
ؚتØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ ÙÙ
Ø¬Ø²ÙØ¡ ÙØºÙÙ. ÙÙ
ÙÙ٠اÙÙØµÙ٠إÙÙ `pad_token_id` ÙÙÙÙ
ÙØ°Ø¬ ÙÙ
Ø¹Ø±ÙØ© ÙÙÙ
تÙ. ÙØ¯ تÙÙÙ `pad_token_id` `None` ÙØšØ¹Ø¶ اÙÙÙ
Ø§Ø°Ø¬Ø ÙÙÙÙ ÙÙ
ÙÙ٠دا؊Ù
ÙØ§ تعÙÙÙÙØ§ ÙØ¯ÙÙÙØ§.
```python
>>> from transformers import AutoModelForSequenceClassification
>>> import torch
>>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-base-uncased")
>>> model.config.pad_token_id
0
```
ÙÙØ¶Ø اÙÙ
Ø«Ø§Ù Ø§ÙØªØ§Ù٠اÙÙ
ÙØ®Ø±Ø¬Ø§Øª ؚدÙÙ Ø¥Ø®ÙØ§Ø¡ رÙ
ÙØ² Ø§ÙØØŽÙ:
```python
>>> input_ids = torch.tensor([[7592, 2057, 2097, 2393, 9611, 2115], [7592, 0, 0, 0, 0, 0]])
>>> output = model(input_ids)
>>> print(output.logits)
tensor([[ 0.0082, -0.2307],
[ 0.1317, -0.1683]], grad_fn=<AddmmBackward0>)
```
ÙÙØ§ اÙÙ
ÙØ®Ø±Ø¬Ø§Øª اÙÙØ¹ÙÙØ© ÙÙØªØ³ÙØ³Ù Ø§ÙØ«Ø§ÙÙ:
```python
>>> input_ids = torch.tensor([[7592]])
>>> output = model(input_ids)
>>> print(output.logits)
tensor([[-0.1008, -0.4061]], grad_fn=<AddmmBackward0>)
```
ÙØ¬Øš عÙÙÙ ÙÙ Ù
ع؞Ù
اÙÙÙØª تÙÙÙØ± `attention_mask` ÙÙÙÙ
ÙØ°Ø¬ ÙØªØ¬Ø§Ù٠رÙ
ÙØ² Ø§ÙØØŽÙ ÙØªØ¬ÙØš ÙØ°Ø§ Ø§ÙØ®Ø·Ø£ Ø§ÙØµØ§Ù
ت. Ø§ÙØ¢Ù ÙØªØ·Ø§ØšÙ Ù
ÙØ®Ø±Ø¬Ø§Øª Ø§ÙØªØ³ÙØ³Ù Ø§ÙØ«Ø§ÙÙ Ù
ع Ù
ÙØ®Ø±Ø¬Ø§ØªÙ اÙÙØ¹ÙÙØ©:
<Tip>
ؚ؎ÙÙ Ø§ÙØªØ±Ø§Ø¶ÙØ ÙÙØŽØŠ Ù
Ø¬Ø²ÙØ¡ اÙÙØµÙص `attention_mask` ÙÙ Ø§Ø³ØªÙØ§Ø¯Ùا Ø¥Ù٠إعدادات اÙÙ
Ø¬Ø²ÙØ¡ اÙÙ
ØØ¯Ø¯.
</Tip>
```python
>>> attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0]])
>>> output = model(input_ids, attention_mask=attention_mask)
>>> print(output.logits)
tensor([[ 0.0082, -0.2307],
[-0.1008, -0.4061]], grad_fn=<AddmmBackward0>)
```
ÙØ§ ÙÙØŽØŠ ð€ Transformers تÙÙØ§ØŠÙÙØ§ `attention_mask` ÙØ¥Ø®Ùاء رÙ
ز Ø§ÙØØŽÙ Ø¥Ø°Ø§ تÙ
تÙÙÙØ±Ù ÙØ£Ù:
- ؚعض اÙÙÙ
اذج ÙÙØ³ ÙÙØ§ رÙ
ز ØØŽÙ.
- ؚاÙÙØ³ØšØ© ÙØšØ¹Ø¶ Ø§ÙØ§Ø³ØªØ®Ø¯Ø§Ù
Ø§ØªØ ÙØ±Ùد اÙÙ
ستخدÙ
Ù٠أ٠ÙÙØªØšÙ اÙÙÙ
ÙØ°Ø¬ Ø¥Ù٠رÙ
ز Ø§ÙØØŽÙ.
## ValueError: ÙØŠØ© Ø§ÙØªÙÙÙÙ ØºÙØ± اÙÙ
Ø¹ØªØ±Ù ØšÙØ§ XYZ ÙÙØ°Ø§ اÙÙÙØ¹ Ù
Ù AutoModel
ؚ؎Ù٠عاÙ
Ø ÙÙØµÙ ؚاستخداÙ
ÙØŠØ© [`AutoModel`] ÙØªØÙ
Ù٠اÙÙØ³Ø® اÙÙ
درؚة Ù
سؚÙÙØ§ Ù
٠اÙÙÙ
اذج. ÙÙ
ÙÙ ÙÙØ°Ù اÙÙØŠØ© Ø£Ù ØªØ³ØªÙØªØ¬ ÙØªÙØÙ
٠تÙÙØ§ØŠÙÙØ§ Ø§ÙØšÙÙØ© Ø§ÙØµØÙØØ© Ù
Ù ÙØ³Ø® Ù
عÙÙØ© ØšÙØ§Ø¡Ù عÙÙ Ø§ÙØªÙÙÙÙ. إذا Ø±Ø£ÙØª ÙØ°Ø§ Ø§ÙØ®Ø·Ø£ `ValueError` Ø¹ÙØ¯ تØÙ
ÙÙ ÙÙ
ÙØ°Ø¬ Ù
Ù ÙØ³Ø®Ø©Ø ÙÙØ°Ø§ ÙØ¹Ù٠أ٠اÙÙØŠØ© Ø§ÙØªÙÙØ§ØŠÙØ© (Auto) ÙÙ
تتÙ
ÙÙ Ù
Ù Ø§ÙØ¹Ø«Ùر عÙÙ Ø®Ø±ÙØ·Ø© Ù
Ù Ø§ÙØªÙÙÙÙ ÙÙ ÙÙØ·Ø© Ø§ÙØªÙØªÙØŽ Ø§ÙÙ
عطاة Ø¥ÙÙ ÙÙØ¹ اÙÙÙ
ÙØ°Ø¬ Ø§ÙØ°Ù ØªÙØØ§Ù٠تØÙ
ÙÙÙ. ÙØºØ§ÙØšÙØ§ Ù
ا ÙØØ¯Ø« ÙØ°Ø§ Ø¹ÙØ¯Ù
ا ÙØ§ تدعÙ
ÙÙØ·Ø© Ø§ÙØªÙØªÙØŽ Ù
ÙÙ
Ø© Ù
عÙÙØ©.
عÙ٠سؚÙ٠اÙÙ
Ø«Ø§ÙØ Ø³ØªØ±Ù ÙØ°Ø§ Ø§ÙØ®Ø·Ø£ Ù٠اÙÙ
Ø«Ø§Ù Ø§ÙØªØ§ÙÙ ÙØ£ÙÙ ÙØ§ ÙÙØ¬Ø¯ GPT2 ÙÙØ¥Ø¬Ø§ØšØ© عÙÙ Ø§ÙØ£Ø³ØŠÙØ©:
```py
>>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
>>> processor = AutoProcessor.from_pretrained("openai-community/gpt2-medium")
>>> model = AutoModelForQuestionAnswering.from_pretrained("openai-community/gpt2-medium")
ValueError: Unrecognized configuration class <class 'transformers.models.gpt2.configuration_gpt2.GPT2Config'> for this kind of AutoModel: AutoModelForQuestionAnswering.
Model type should be one of AlbertConfig, BartConfig, BertConfig, BigBirdConfig, BigBirdPegasusConfig, BloomConfig, ...
```
| transformers/docs/source/ar/troubleshooting.md/0 | {
"file_path": "transformers/docs/source/ar/troubleshooting.md",
"repo_id": "transformers",
"token_count": 5400
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Schnellstart
[[open-in-colab]]
Mit ð€ Transformers können Sie sofort loslegen! Verwenden Sie die [`pipeline`] fÃŒr schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer [AutoClass](./model_doc/auto), um Ihre Text-, Bild- oder Audioaufgabe zu lösen.
<Tip>
Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter fÃŒr PyTorch und TensorFlow. Wenn
nicht, wird erwartet, dass der Code fÃŒr beide Backends ohne Ãnderungen funktioniert.
</Tip>
## Pipeline
[`pipeline`] ist der einfachste Weg, ein vortrainiertes Modell fÃŒr eine bestimmte Aufgabe zu verwenden.
<Youtube id="tiZFewofSLM"/>
Die [`pipeline`] unterstÌtzt viele gÀngige Aufgaben:
**Text**:
* Stimmungsanalyse: Klassifizierung der PolaritÀt eines gegebenen Textes.
* Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe.
* Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der EntitÀt, die es reprÀsentiert (Person, Datum, Ort usw.).
* Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind.
* Fill-mask: AusfÌllen von LÌcken in einem Text mit maskierten Wörtern.
* Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz.
* Ãbersetzung: Ãbersetzen eines Textes in eine andere Sprache.
* Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes.
**Bild**:
* Bildklassifizierung: Klassifizierung eines Bildes.
* Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild.
* Objekterkennung: Erkennen von Objekten innerhalb eines Bildes.
**Audio**:
* Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment.
* Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text.
<Tip>
FÃŒr mehr Details ÃŒber die [`pipeline`] und assoziierte Aufgaben, schauen Sie in die Dokumentation [hier](./main_classes/pipelines).
</Tip>
### Verwendung der Pipeline
Im folgenden Beispiel werden Sie die [`pipeline`] fÃŒr die Stimmungsanalyse verwenden.
Installieren Sie die folgenden AbhÀngigkeiten, falls Sie dies nicht bereits getan haben:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
Importieren sie die [`pipeline`] und spezifizieren sie die Aufgabe, welche sie lösen möchten:
```py
>>> from transformers import pipeline
>>> classifier = pipeline("sentiment-analysis")
```
Die Pipeline lÀdt ein standardmÀÃiges [vortrainiertes Modell](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) und einen Tokenizer fÃŒr die Stimmungs-Analyse herunter und speichert sie. Jetzt können Sie den "Klassifikator" auf Ihren Zieltext anwenden:
```py
>>> classifier("We are very happy to show you the ð€ Transformers library.")
[{'label': 'POSITIVE', 'score': 0.9998}]
```
For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries:
```py
>>> results = classifier(["We are very happy to show you the ð€ Transformers library.", "We hope you don't hate it."])
>>> for result in results:
... print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
label: POSITIVE, with score: 0.9998
label: NEGATIVE, with score: 0.5309
```
Die [`pipeline`] kann auch ÃŒber einen ganzen Datensatz iterieren. Starten wir mit der Installation der [ð€ Datasets](https://huggingface.co/docs/datasets/) Bibliothek:
```bash
pip install datasets
```
Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell welches wir nutzen möchten.
```py
>>> import torch
>>> from transformers import pipeline
>>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
```
Als nÀchstes laden wir den Datensatz (siehe ð€ Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) fÃŒr mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT
```
Wir mÃŒssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der `facebook/wav2vec2-base-960h` trainiert wurde.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate))
```
Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte "audio" aufgerufen wird.
Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und ÃŒbergeben wir sie als Liste an die Pipeline:
```py
>>> result = speech_recognizer(dataset[:4]["audio"])
>>> print([d["text"] for d in result])
['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT']
```
Bei einem gröÃeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste ÃŒbergeben, der alle Eingaben in den Speicher lÀdt. Weitere Informationen finden Sie in der [Pipeline-Dokumentation](./main_classes/pipelines).
### Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden
Die [`pipeline`] kann jedes Modell aus dem [Model Hub](https://huggingface.co/models) verwenden, wodurch es einfach ist, die [`pipeline`] fÃŒr andere AnwendungsfÀlle anzupassen. Wenn Sie beispielsweise ein Modell wÃŒnschen, das französischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges [BERT-Modell](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment), das auf die Stimmungsanalyse abgestimmt ist. GroÃartig, verwenden wir dieses Modell!
```py
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
```
<frameworkcontent>
<pt>
Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below):
```py
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</pt>
<tf>
Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below):
```py
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</tf>
</frameworkcontent>
Dann können Sie das Modell und den Tokenizer in der [`pipeline`] angeben und den `Klassifikator` auf Ihren Zieltext anwenden:
```py
>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
>>> classifier("Nous sommes trÚs heureux de vous présenter la bibliothÚque ð€ Transformers.")
[{'label': '5 stars', 'score': 0.7273}]
```
Wenn Sie kein Modell fÃŒr Ihren Anwendungsfall finden können, mÃŒssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser [Feinabstimmungs-Tutorial](./training) an, um zu erfahren, wie das geht. Und schlieÃlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial [hier](./model_sharing)), um NLP fÃŒr alle zu demokratisieren! ð€
## AutoClass
<Youtube id="AhChOFRegn4"/>
Unter der Haube arbeiten die Klassen [`AutoModelForSequenceClassification`] und [`AutoTokenizer`] zusammen, um die [`pipeline`] zu betreiben. Eine [`AutoClass`](./model_doc/auto) ist eine AbkÌrzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie mÌssen nur die passende `AutoClass` fÌr Ihre Aufgabe und den zugehörigen Tokenizer mit [`AutoTokenizer`] auswÀhlen.
Kehren wir zu unserem Beispiel zurÌck und sehen wir uns an, wie Sie die `AutoClass` verwenden können, um die Ergebnisse der [`pipeline`] zu replizieren.
### AutoTokenizer
Ein Tokenizer ist fÌr die Vorverarbeitung von Text in ein fÌr das Modell verstÀndliches Format zustÀndig. ZunÀchst zerlegt der Tokenisierer den Text in Wörter, die *Token* genannt werden. Es gibt mehrere Regeln fÌr den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen Ìber Tokenisierung [hier](./tokenizer_summary)). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren mÌssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde.
Laden sie einen Tokenizer mit [`AutoTokenizer`]:
```py
>>> from transformers import AutoTokenizer
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
AnschlieÃend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe fÃŒr das Modell zu konstruieren. Dieser wird als *Vokabular* des Modells bezeichnet.
Ãbergeben Sie Ihren Text an den Tokenizer:
```py
>>> encoding = tokenizer("We are very happy to show you the ð€ Transformers library.")
>>> print(encoding)
{'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
Der Tokenizer gibt ein Wörterbuch zurÌck, das Folgendes enthÀlt:
* [input_ids](./glossary#input-ids): numerische ReprÀsentationen Ihrer Token.
* [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen.
Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. DarÌber hinaus kann der Tokenizer den Text auch auffÌllen und kÌrzen, um einen Stapel mit einheitlicher LÀnge zurÌckzugeben:
<frameworkcontent>
<pt>
```py
>>> pt_batch = tokenizer(
... ["We are very happy to show you the ð€ Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="pt",
... )
```
</pt>
<tf>
```py
>>> tf_batch = tokenizer(
... ["We are very happy to show you the ð€ Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="tf",
... )
```
</tf>
</frameworkcontent>
Lesen Sie das Tutorial [preprocessing](./preprocessing) fÃŒr weitere Details zur Tokenisierung.
### AutoModel
<frameworkcontent>
<pt>
ð€ Transformers bietet eine einfache und einheitliche Möglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein [`AutoModel`] laden können, wie Sie einen [`AutoTokenizer`] laden wÃŒrden. Der einzige Unterschied ist die Auswahl des richtigen [`AutoModel`] fÃŒr die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie [`AutoModelForSequenceClassification`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fÃŒr welche Aufgabe zu verwenden ist.
</Tip>
Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell Ìbergeben. Sie mÌssen nur das Wörterbuch entpacken, indem Sie `**` hinzufÌgen:
```py
>>> pt_outputs = pt_model(**pt_batch)
```
Das Modell gibt die endgÃŒltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten:
```py
>>> from torch import nn
>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> print(pt_predictions)
tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
[0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>)
```
</pt>
<tf>
ð€ Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein [`TFAutoModel`] genauso laden können, wie Sie einen [`AutoTokenizer`] laden wÃŒrden. Der einzige Unterschied ist die Auswahl des richtigen [`TFAutoModel`] fÃŒr die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie [`TFAutoModelForSequenceClassification`]:
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fÃŒr welche Aufgabe zu verwenden ist.
</Tip>
Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell Ìbergeben, indem Sie die WörterbuchschlÌssel direkt an die Tensoren Ìbergeben:
```py
>>> tf_outputs = tf_model(tf_batch)
```
Das Modell gibt die endgÃŒltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten:
```py
>>> import tensorflow as tf
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
>>> tf_predictions # doctest: +IGNORE_RESULT
```
</tf>
</frameworkcontent>
<Tip>
Alle ð€ Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren *vor* der endgÃŒltigen Aktivierungsfunktion
Funktion (wie Softmax) aus, da die endgÃŒltige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist.
</Tip>
Modelle sind ein standardmÀÃiges [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) oder ein [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model), sodass Sie sie in Ihrer ÃŒblichen Trainingsschleife verwenden können. Um jedoch die Dinge einfacher zu machen, bietet ð€ Transformers eine [`Trainer`]-Klasse fÃŒr PyTorch, die FunktionalitÀt fÃŒr verteiltes Training, gemischte PrÀzision und mehr bietet. FÃŒr TensorFlow können Sie die Methode `fit` aus [Keras](https://keras.io/) verwenden. Siehe das [training tutorial](./training) fÃŒr weitere Details.
<Tip>
Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollstÀndigt werden.
Die ModellausgÀnge verhalten sich auch wie ein Tupel oder ein Wörterbuch (z.B. können Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die "None" sind, ignoriert werden.
</Tip>
### Modell speichern
<frameworkcontent>
<pt>
Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer speichern, indem Sie [`PreTrainedModel.save_pretrained`] verwenden:
```py
>>> pt_save_directory = "./pt_save_pretrained"
>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT
>>> pt_model.save_pretrained(pt_save_directory)
```
Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit [`PreTrainedModel.from_pretrained`]:
```py
>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")
```
</pt>
<tf>
Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer unter Verwendung von [`TFPreTrainedModel.save_pretrained`] speichern:
```py
>>> tf_save_directory = "./tf_save_pretrained"
>>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT
>>> tf_model.save_pretrained(tf_save_directory)
```
Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit [`TFPreTrainedModel.from_pretrained`]:
```py
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")
```
</tf>
</frameworkcontent>
Ein besonders cooles ð€ Transformers-Feature ist die Möglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter "from_pt" oder "from_tf" kann das Modell von einem Framework in das andere konvertieren:
<frameworkcontent>
<pt>
```py
>>> from transformers import AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
```
</pt>
<tf>
```py
>>> from transformers import TFAutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
```
</tf>
</frameworkcontent>
## Custom model builds
Sie können die Konfigurationsklasse des Modells Àndern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitsköpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufÀllig initialisiert, und Sie mÌssen das Modell trainieren, bevor Sie es verwenden können, um aussagekrÀftige Ergebnisse zu erhalten.
Beginnen Sie mit dem Import von [`AutoConfig`] und laden Sie dann das trainierte Modell, das Sie Àndern möchten. Innerhalb von [`AutoConfig.from_pretrained`] können Sie das Attribut angeben, das Sie Àndern möchten, z. B. die Anzahl der Aufmerksamkeitsköpfe:
```py
>>> from transformers import AutoConfig
>>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12)
```
<frameworkcontent>
<pt>
Create a model from your custom configuration with [`AutoModel.from_config`]:
```py
>>> from transformers import AutoModel
>>> my_model = AutoModel.from_config(my_config)
```
</pt>
<tf>
Create a model from your custom configuration with [`TFAutoModel.from_config`]:
```py
>>> from transformers import TFAutoModel
>>> my_model = TFAutoModel.from_config(my_config)
```
</tf>
</frameworkcontent>
Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung [Erstellen einer benutzerdefinierten Architektur](./create_a_model).
## Wie geht es weiter?
Nachdem Sie nun die ð€ Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun können, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells fÃŒr eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr ÃŒber die Kernkonzepte von ð€ Transformers erfahren möchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen LeitfÀden!
| transformers/docs/source/de/quicktour.md/0 | {
"file_path": "transformers/docs/source/de/quicktour.md",
"repo_id": "transformers",
"token_count": 7324
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Instantiate a big model
A barrier to accessing very large pretrained models is the amount of memory required. When loading a pretrained PyTorch model, you usually:
1. Create a model with random weights.
2. Load your pretrained weights.
3. Put those pretrained weights in the model.
The first two steps both require a full version of the model in memory and if the model weighs several GBs, you may not have enough memory for two copies of it. This problem is amplified in distributed training environments because each process loads a pretrained model and stores two copies in memory.
> [!TIP]
> The randomly created model is initialized with "empty" tensors, which take space in memory without filling it. The random values are whatever was in this chunk of memory at the time. To improve loading speed, the [`_fast_init`](https://github.com/huggingface/transformers/blob/c9f6e5e35156e068b227dd9b15521767f6afd4d2/src/transformers/modeling_utils.py#L2710) parameter is set to `True` by default to skip the random initialization for all weights that are correctly loaded.
This guide will show you how Transformers can help you load large pretrained models despite their memory requirements.
## Sharded checkpoints
From Transformers v4.18.0, a checkpoint larger than 10GB is automatically sharded by the [`~PreTrainedModel.save_pretrained`] method. It is split into several smaller partial checkpoints and creates an index file that maps parameter names to the files they're stored in.
The maximum shard size is controlled with the `max_shard_size` parameter, but by default it is 5GB, because it is easier to run on free-tier GPU instances without running out of memory.
For example, let's shard [BioMistral/BioMistral-7B](https://hf.co/BioMistral/BioMistral-7B).
```py
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
... print(sorted(os.listdir(tmp_dir)))
['config.json', 'generation_config.json', 'model-00001-of-00006.safetensors', 'model-00002-of-00006.safetensors', 'model-00003-of-00006.safetensors', 'model-00004-of-00006.safetensors', 'model-00005-of-00006.safetensors', 'model-00006-of-00006.safetensors', 'model.safetensors.index.json']
```
The sharded checkpoint is reloaded with the [`~PreTrainedModel.from_pretrained`] method.
```py
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
... new_model = AutoModel.from_pretrained(tmp_dir)
```
The main advantage of sharded checkpoints for big models is that each shard is loaded after the previous one, which caps the memory usage to only the model size and the largest shard size.
You could also directly load a sharded checkpoint inside a model without the [`~PreTrainedModel.from_pretrained`] method (similar to PyTorch's `load_state_dict()` method for a full checkpoint). In this case, use the [`~modeling_utils.load_sharded_checkpoint`] method.
```py
>>> from transformers.modeling_utils import load_sharded_checkpoint
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
... load_sharded_checkpoint(model, tmp_dir)
```
### Shard metadata
The index file determines which keys are in the checkpoint and where the corresponding weights are stored. This file is loaded like any other JSON file and you can get a dictionary from it.
```py
>>> import json
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
... with open(os.path.join(tmp_dir, "model.safetensors.index.json"), "r") as f:
... index = json.load(f)
>>> print(index.keys())
dict_keys(['metadata', 'weight_map'])
```
The `metadata` key provides the total model size.
```py
>>> index["metadata"]
{'total_size': 28966928384}
```
The `weight_map` key maps each parameter name (typically `state_dict` in a PyTorch model) to the shard it's stored in.
```py
>>> index["weight_map"]
{'lm_head.weight': 'model-00006-of-00006.safetensors',
'model.embed_tokens.weight': 'model-00001-of-00006.safetensors',
'model.layers.0.input_layernorm.weight': 'model-00001-of-00006.safetensors',
'model.layers.0.mlp.down_proj.weight': 'model-00001-of-00006.safetensors',
...
}
```
## Accelerate's Big Model Inference
> [!TIP]
> Make sure you have Accelerate v0.9.0 or later and PyTorch v1.9.0 or later installed.
From Transformers v4.20.0, the [`~PreTrainedModel.from_pretrained`] method is supercharged with Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature to efficiently handle really big models! Big Model Inference creates a *model skeleton* on PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. The randomly initialized parameters are only created when the pretrained weights are loaded. This way, you aren't keeping two copies of the model in memory at the same time (one for the randomly initialized model and one for the pretrained weights), and the maximum memory consumed is only the full model size.
To enable Big Model Inference in Transformers, set `low_cpu_mem_usage=True` in the [`~PreTrainedModel.from_pretrained`] method.
```py
from transformers import AutoModelForCausalLM
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", low_cpu_mem_usage=True)
```
Accelerate automatically dispatches the model weights across all available devices, starting with the fastest device (GPU) first and then offloading to the slower devices (CPU and even hard drive). This is enabled by setting `device_map="auto"` in the [`~PreTrainedModel.from_pretrained`] method. When you pass the `device_map` parameter, `low_cpu_mem_usage` is automatically set to `True` so you don't need to specify it.
```py
from transformers import AutoModelForCausalLM
# these loading methods are equivalent
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto")
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", low_cpu_mem_usage=True)
```
You can also write your own `device_map` by mapping each layer to a device. It should map all model parameters to a device, but you don't have to detail where all the submodules of a layer go if the entire layer is on the same device.
```python
device_map = {"model.layers.1": 0, "model.layers.14": 1, "model.layers.31": "cpu", "lm_head": "disk"}
```
Access `hf_device_map` attribute to see how Accelerate split the model across devices.
```py
gemma.hf_device_map
```
```python out
{'model.embed_tokens': 0,
'model.layers.0': 0,
'model.layers.1': 0,
'model.layers.2': 0,
'model.layers.3': 0,
'model.layers.4': 0,
'model.layers.5': 0,
'model.layers.6': 0,
'model.layers.7': 0,
'model.layers.8': 0,
'model.layers.9': 0,
'model.layers.10': 0,
'model.layers.11': 0,
'model.layers.12': 0,
'model.layers.13': 0,
'model.layers.14': 'cpu',
'model.layers.15': 'cpu',
'model.layers.16': 'cpu',
'model.layers.17': 'cpu',
'model.layers.18': 'cpu',
'model.layers.19': 'cpu',
'model.layers.20': 'cpu',
'model.layers.21': 'cpu',
'model.layers.22': 'cpu',
'model.layers.23': 'cpu',
'model.layers.24': 'cpu',
'model.layers.25': 'cpu',
'model.layers.26': 'cpu',
'model.layers.27': 'cpu',
'model.layers.28': 'cpu',
'model.layers.29': 'cpu',
'model.layers.30': 'cpu',
'model.layers.31': 'cpu',
'model.norm': 'cpu',
'lm_head': 'cpu'}
```
## Model data type
PyTorch model weights are normally instantiated as torch.float32 and it can be an issue if you try to load a model as a different data type. For example, you'd need twice as much memory to load the weights in torch.float32 and then again to load them in your desired data type, like torch.float16.
> [!WARNING]
> Due to how PyTorch is designed, the `torch_dtype` parameter only supports floating data types.
To avoid wasting memory like this, explicitly set the `torch_dtype` parameter to the desired data type or set `torch_dtype="auto"` to load the weights with the most optimal memory pattern (the data type is automatically derived from the model weights).
<hfoptions id="dtype">
<hfoption id="specific dtype">
```py
from transformers import AutoModelForCausalLM
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype=torch.float16)
```
</hfoption>
<hfoption id="auto dtype">
```py
from transformers import AutoModelForCausalLM
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype="auto")
```
</hfoption>
</hfoptions>
You can also set the data type to use for models instantiated from scratch.
```python
import torch
from transformers import AutoConfig, AutoModel
my_config = AutoConfig.from_pretrained("google/gemma-2b", torch_dtype=torch.float16)
model = AutoModel.from_config(my_config)
```
| transformers/docs/source/en/big_models.md/0 | {
"file_path": "transformers/docs/source/en/big_models.md",
"repo_id": "transformers",
"token_count": 3022
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Backbone
A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes:
* [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices.
* [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration.
[timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes.
Backbones are supported for the following models:
* [BEiT](../model_doc/beit)
* [BiT](../model_doc/bit)
* [ConvNext](../model_doc/convnext)
* [ConvNextV2](../model_doc/convnextv2)
* [DiNAT](../model_doc/dinat)
* [DINOV2](../model_doc/dinov2)
* [FocalNet](../model_doc/focalnet)
* [MaskFormer](../model_doc/maskformer)
* [NAT](../model_doc/nat)
* [ResNet](../model_doc/resnet)
* [Swin Transformer](../model_doc/swin)
* [Swin Transformer v2](../model_doc/swinv2)
* [ViTDet](../model_doc/vitdet)
## AutoBackbone
[[autodoc]] AutoBackbone
## BackboneMixin
[[autodoc]] utils.BackboneMixin
## BackboneConfigMixin
[[autodoc]] utils.BackboneConfigMixin
## TimmBackbone
[[autodoc]] models.timm_backbone.TimmBackbone
## TimmBackboneConfig
[[autodoc]] models.timm_backbone.TimmBackboneConfig
| transformers/docs/source/en/main_classes/backbones.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/backbones.md",
"repo_id": "transformers",
"token_count": 689
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BEiT
## Overview
The BEiT model was proposed in [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by
Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of
Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class
of an image (as done in the [original ViT paper](https://arxiv.org/abs/2010.11929)), BEiT models are pre-trained to
predict visual tokens from the codebook of OpenAI's [DALL-E model](https://arxiv.org/abs/2102.12092) given masked
patches.
The abstract from the paper is the following:
*We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation
from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image
modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image
patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into
visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training
objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we
directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder.
Experimental results on image classification and semantic segmentation show that our model achieves competitive results
with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K,
significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains
86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).*
This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was
contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit).
## Usage tips
- BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They
outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as
fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace
[`ViTFeatureExtractor`] by [`BeitImageProcessor`] and
[`ViTForImageClassification`] by [`BeitForImageClassification`]).
- There's also a demo notebook available which showcases how to combine DALL-E's image tokenizer with BEiT for
performing masked image modeling. You can find it [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT).
- As the BEiT models expect each image to be of the same size (resolution), one can use
[`BeitImageProcessor`] to resize (or rescale) and normalize images for the model.
- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of
each checkpoint. For example, `microsoft/beit-base-patch16-224` refers to a base-sized architecture with patch
resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=microsoft/beit).
- The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of
14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million
images and 1,000 classes).
- BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the
relative position bias among the several self-attention layers. During fine-tuning, each layer's relative position
bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to
pre-train a model from scratch, one needs to either set the `use_relative_position_bias` or the
`use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add
position embeddings.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/beit_architecture.jpg"
alt="drawing" width="600"/>
<small> BEiT pre-training. Taken from the <a href="https://arxiv.org/abs/2106.08254">original paper.</a> </small>
### Using Scaled Dot Product Attention (SDPA)
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
page for more information.
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
```
from transformers import BeitForImageClassification
model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224", attn_implementation="sdpa", torch_dtype=torch.float16)
...
```
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
On a local benchmark (NVIDIA GeForce RTX 2060-8GB, PyTorch 2.5.1, OS Ubuntu 20.04) with `float16` and
`microsoft/beit-base-patch16-224` model, we saw the following improvements during training and inference:
#### Training
| num_training_steps | batch_size | image_size | is_cuda | Time per batch (eager - s) | Time per batch (sdpa - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) |
|--------------------|------------|--------------|---------|----------------------------|---------------------------|-------------|----------------------|--------------------|----------------|
| 50 | 2 | (1048, 640) | True | 0.984 | 0.746 | 31.975 | 6738.915 | 4319.886 | 55.998 |
#### Inference
| Image batch size | Eager (s/iter) | Eager CI, % | Eager memory (MB) | SDPA (s/iter) | SDPA CI, % | SDPA memory (MB) | SDPA speedup | SDPA memory saved (%) |
|-------------------:|-----------------:|:--------------|--------------------:|----------------:|:-------------|-------------------:|---------------:|----------------------:|
| 1 | 0.012 | ±0.3% | 3.76657e+08 | 0.011 | ±0.5% | 3.75739e+08 | 1.05 | 0.244 |
| 4 | 0.013 | ±0.1% | 4.03147e+08 | 0.011 | ±0.2% | 3.90554e+08 | 1.178 | 3.225 |
| 16 | 0.045 | ±0.1% | 4.96697e+08 | 0.035 | ±0.1% | 4.51232e+08 | 1.304 | 10.076 |
| 32 | 0.088 | ±0.1% | 6.24417e+08 | 0.066 | ±0.1% | 5.33488e+08 | 1.325 | 17.044 |
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with BEiT.
<PipelineTag pipeline="image-classification"/>
- [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
- See also: [Image classification task guide](../tasks/image_classification)
**Semantic segmentation**
- [Semantic segmentation task guide](../tasks/semantic_segmentation)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## BEiT specific outputs
[[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling
[[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling
## BeitConfig
[[autodoc]] BeitConfig
## BeitFeatureExtractor
[[autodoc]] BeitFeatureExtractor
- __call__
- post_process_semantic_segmentation
## BeitImageProcessor
[[autodoc]] BeitImageProcessor
- preprocess
- post_process_semantic_segmentation
<frameworkcontent>
<pt>
## BeitModel
[[autodoc]] BeitModel
- forward
## BeitForMaskedImageModeling
[[autodoc]] BeitForMaskedImageModeling
- forward
## BeitForImageClassification
[[autodoc]] BeitForImageClassification
- forward
## BeitForSemanticSegmentation
[[autodoc]] BeitForSemanticSegmentation
- forward
</pt>
<jax>
## FlaxBeitModel
[[autodoc]] FlaxBeitModel
- __call__
## FlaxBeitForMaskedImageModeling
[[autodoc]] FlaxBeitForMaskedImageModeling
- __call__
## FlaxBeitForImageClassification
[[autodoc]] FlaxBeitForImageClassification
- __call__
</jax>
</frameworkcontent> | transformers/docs/source/en/model_doc/beit.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/beit.md",
"repo_id": "transformers",
"token_count": 3501
} |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ConvBERT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=convbert">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/conv-bert-base">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The ConvBERT model was proposed in [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng
Yan.
The abstract from the paper is the following:
*Pre-trained language models like BERT and its variants have recently achieved impressive performance in various
natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers
large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for
generating the attention map from a global perspective, we observe some heads only need to learn local dependencies,
which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to
replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the
rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context
learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that
ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and
fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while
using less than 1/4 training cost. Code and pre-trained models will be released.*
This model was contributed by [abhishek](https://huggingface.co/abhishek). The original implementation can be found
here: https://github.com/yitu-opensource/ConvBert
## Usage tips
ConvBERT training tips are similar to those of BERT. For usage tips refer to [BERT documentation](bert).
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## ConvBertConfig
[[autodoc]] ConvBertConfig
## ConvBertTokenizer
[[autodoc]] ConvBertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## ConvBertTokenizerFast
[[autodoc]] ConvBertTokenizerFast
<frameworkcontent>
<pt>
## ConvBertModel
[[autodoc]] ConvBertModel
- forward
## ConvBertForMaskedLM
[[autodoc]] ConvBertForMaskedLM
- forward
## ConvBertForSequenceClassification
[[autodoc]] ConvBertForSequenceClassification
- forward
## ConvBertForMultipleChoice
[[autodoc]] ConvBertForMultipleChoice
- forward
## ConvBertForTokenClassification
[[autodoc]] ConvBertForTokenClassification
- forward
## ConvBertForQuestionAnswering
[[autodoc]] ConvBertForQuestionAnswering
- forward
</pt>
<tf>
## TFConvBertModel
[[autodoc]] TFConvBertModel
- call
## TFConvBertForMaskedLM
[[autodoc]] TFConvBertForMaskedLM
- call
## TFConvBertForSequenceClassification
[[autodoc]] TFConvBertForSequenceClassification
- call
## TFConvBertForMultipleChoice
[[autodoc]] TFConvBertForMultipleChoice
- call
## TFConvBertForTokenClassification
[[autodoc]] TFConvBertForTokenClassification
- call
## TFConvBertForQuestionAnswering
[[autodoc]] TFConvBertForQuestionAnswering
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/convbert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/convbert.md",
"repo_id": "transformers",
"token_count": 1393
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# EfficientNet
## Overview
The EfficientNet model was proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
by Mingxing Tan and Quoc V. Le. EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models.
The abstract from the paper is the following:
*Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet.
To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.*
This model was contributed by [adirik](https://huggingface.co/adirik).
The original code can be found [here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet).
## EfficientNetConfig
[[autodoc]] EfficientNetConfig
## EfficientNetImageProcessor
[[autodoc]] EfficientNetImageProcessor
- preprocess
## EfficientNetModel
[[autodoc]] EfficientNetModel
- forward
## EfficientNetForImageClassification
[[autodoc]] EfficientNetForImageClassification
- forward
| transformers/docs/source/en/model_doc/efficientnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/efficientnet.md",
"repo_id": "transformers",
"token_count": 725
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# FNet
## Overview
The FNet model was proposed in [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by
James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT
model with a fourier transform which returns only the real parts of the transform. The model is significantly faster
than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97%
accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the
paper is the following:
*We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the
self-attention sublayers with simple linear transformations that "mix" input tokens. These linear mixers, along with
standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text
classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder
with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE
benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths,
our FNet model is significantly faster: when compared to the "efficient" Transformers on the Long Range Arena
benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all
sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint
and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models
outperform Transformer counterparts.*
This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net).
## Usage tips
The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with
maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum
sequence length for fine-tuning and inference.
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## FNetConfig
[[autodoc]] FNetConfig
## FNetTokenizer
[[autodoc]] FNetTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## FNetTokenizerFast
[[autodoc]] FNetTokenizerFast
## FNetModel
[[autodoc]] FNetModel
- forward
## FNetForPreTraining
[[autodoc]] FNetForPreTraining
- forward
## FNetForMaskedLM
[[autodoc]] FNetForMaskedLM
- forward
## FNetForNextSentencePrediction
[[autodoc]] FNetForNextSentencePrediction
- forward
## FNetForSequenceClassification
[[autodoc]] FNetForSequenceClassification
- forward
## FNetForMultipleChoice
[[autodoc]] FNetForMultipleChoice
- forward
## FNetForTokenClassification
[[autodoc]] FNetForTokenClassification
- forward
## FNetForQuestionAnswering
[[autodoc]] FNetForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/fnet.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/fnet.md",
"repo_id": "transformers",
"token_count": 1150
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# GPT-NeoX-Japanese
## Overview
We introduce GPT-NeoX-Japanese, which is an autoregressive language model for Japanese, trained on top of [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox).
Japanese is a unique language with its large vocabulary and a combination of hiragana, katakana, and kanji writing scripts.
To address this distinct structure of the Japanese language, we use a [special sub-word tokenizer](https://github.com/tanreinama/Japanese-BPEEncoder_V2). We are very grateful to *tanreinama* for open-sourcing this incredibly helpful tokenizer.
Following the recommendations from Google's research on [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html), we have removed bias parameters from transformer blocks, achieving better model performance. Please refer [this article](https://medium.com/ml-abeja/training-a-better-gpt-2-93b157662ae4) in detail.
Development of the model was led by [Shinya Otani](https://github.com/SO0529), [Takayoshi Makabe](https://github.com/spider-man-tm), [Anuj Arora](https://github.com/Anuj040), and [Kyo Hattori](https://github.com/go5paopao) from [ABEJA, Inc.](https://www.abejainc.com/). For more information on this model-building activity, please refer [here (ja)](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207).
### Usage example
The `generate()` method can be used to generate text using GPT NeoX Japanese model.
```python
>>> from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseTokenizer
>>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b")
>>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b")
>>> prompt = "人ãšAIãå調ããããã«ã¯ã"
>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids
>>> gen_tokens = model.generate(
... input_ids,
... do_sample=True,
... temperature=0.9,
... max_length=100,
... )
>>> gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0]
>>> print(gen_text)
人ãšAIãå調ããããã«ã¯ãAIãšäººãå
±åããAIãæ£ããçè§£ããå¿
èŠããããŸãã
```
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## GPTNeoXJapaneseConfig
[[autodoc]] GPTNeoXJapaneseConfig
## GPTNeoXJapaneseTokenizer
[[autodoc]] GPTNeoXJapaneseTokenizer
## GPTNeoXJapaneseModel
[[autodoc]] GPTNeoXJapaneseModel
- forward
## GPTNeoXJapaneseForCausalLM
[[autodoc]] GPTNeoXJapaneseForCausalLM
- forward
| transformers/docs/source/en/model_doc/gpt_neox_japanese.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/gpt_neox_japanese.md",
"repo_id": "transformers",
"token_count": 1075
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LiLT
## Overview
The LiLT model was proposed in [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
LiLT allows to combine any pre-trained RoBERTa text encoder with a lightweight Layout Transformer, to enable [LayoutLM](layoutlm)-like document understanding for many
languages.
The abstract from the paper is the following:
*Structured document understanding has attracted considerable attention and made significant progress recently, owing to its crucial role in intelligent document processing. However, most existing related models can only deal with the document data of specific language(s) (typically English) included in the pre-training collection, which is extremely limited. To address this issue, we propose a simple yet effective Language-independent Layout Transformer (LiLT) for structured document understanding. LiLT can be pre-trained on the structured documents of a single language and then directly fine-tuned on other languages with the corresponding off-the-shelf monolingual/multilingual pre-trained textual models. Experimental results on eight languages have shown that LiLT can achieve competitive or even superior performance on diverse widely-used downstream benchmarks, which enables language-independent benefit from the pre-training of document layout structure.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/lilt_architecture.jpg"
alt="drawing" width="600"/>
<small> LiLT architecture. Taken from the <a href="https://arxiv.org/abs/2202.13669">original paper</a>. </small>
This model was contributed by [nielsr](https://huggingface.co/nielsr).
The original code can be found [here](https://github.com/jpwang/lilt).
## Usage tips
- To combine the Language-Independent Layout Transformer with a new RoBERTa checkpoint from the [hub](https://huggingface.co/models?search=roberta), refer to [this guide](https://github.com/jpWang/LiLT#or-generate-your-own-checkpoint-optional).
The script will result in `config.json` and `pytorch_model.bin` files being stored locally. After doing this, one can do the following (assuming you're logged in with your HuggingFace account):
```python
from transformers import LiltModel
model = LiltModel.from_pretrained("path_to_your_files")
model.push_to_hub("name_of_repo_on_the_hub")
```
- When preparing data for the model, make sure to use the token vocabulary that corresponds to the RoBERTa checkpoint you combined with the Layout Transformer.
- As [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) uses the same vocabulary as [LayoutLMv3](layoutlmv3), one can use [`LayoutLMv3TokenizerFast`] to prepare data for the model.
The same is true for [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-infoxlm-base): one can use [`LayoutXLMTokenizerFast`] for that model.
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with LiLT.
- Demo notebooks for LiLT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LiLT).
**Documentation resources**
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## LiltConfig
[[autodoc]] LiltConfig
## LiltModel
[[autodoc]] LiltModel
- forward
## LiltForSequenceClassification
[[autodoc]] LiltForSequenceClassification
- forward
## LiltForTokenClassification
[[autodoc]] LiltForTokenClassification
- forward
## LiltForQuestionAnswering
[[autodoc]] LiltForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/lilt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/lilt.md",
"repo_id": "transformers",
"token_count": 1291
} |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MarianMT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=marian">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-marian-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/opus-mt-zh-en">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
A framework for translation models, using the same models as BART. Translations should be similar, but not identical to output in the test set linked to in each model card.
This model was contributed by [sshleifer](https://huggingface.co/sshleifer).
## Implementation Notes
- Each model is about 298 MB on disk, there are more than 1,000 models.
- The list of supported language pairs can be found [here](https://huggingface.co/Helsinki-NLP).
- Models were originally trained by [Jörg Tiedemann](https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann) using the [Marian](https://marian-nmt.github.io/) C++ library, which supports fast training and translation.
- All models are transformer encoder-decoders with 6 layers in each component. Each model's performance is documented
in a model card.
- The 80 opus models that require BPE preprocessing are not supported.
- The modeling code is the same as [`BartForConditionalGeneration`] with a few minor modifications:
- static (sinusoid) positional embeddings (`MarianConfig.static_position_embeddings=True`)
- no layernorm_embedding (`MarianConfig.normalize_embedding=False`)
- the model starts generating with `pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses
`<s/>`),
- Code to bulk convert models can be found in `convert_marian_to_pytorch.py`.
## Naming
- All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`
- The language codes used to name models are inconsistent. Two digit codes can usually be found [here](https://developers.google.com/admin-sdk/directory/v1/languages), three digit codes require googling "language
code {code}".
- Codes formatted like `es_AR` are usually `code_{region}`. That one is Spanish from Argentina.
- The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second
group use a combination of ISO-639-5 codes and ISO-639-2 codes.
## Examples
- Since Marian models are smaller than many other translation models available in the library, they can be useful for
fine-tuning experiments and integration tests.
- [Fine-tune on GPU](https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/train_distil_marian_enro.sh)
## Multilingual Models
- All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`:
- If a model can output multiple languages, and you should specify a language code by prepending the desired output
language to the `src_text`.
- You can see a models's supported language codes in its model card, under target constituents, like in [opus-mt-en-roa](https://huggingface.co/Helsinki-NLP/opus-mt-en-roa).
- Note that if a model is only multilingual on the source side, like `Helsinki-NLP/opus-mt-roa-en`, no language
codes are required.
New multi-lingual models from the [Tatoeba-Challenge repo](https://github.com/Helsinki-NLP/Tatoeba-Challenge)
require 3 character language codes:
```python
>>> from transformers import MarianMTModel, MarianTokenizer
>>> src_text = [
... ">>fra<< this is a sentence in english that we want to translate to french",
... ">>por<< This should go to portuguese",
... ">>esp<< And this to Spanish",
... ]
>>> model_name = "Helsinki-NLP/opus-mt-en-roa"
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
>>> print(tokenizer.supported_language_codes)
['>>zlm_Latn<<', '>>mfe<<', '>>hat<<', '>>pap<<', '>>ast<<', '>>cat<<', '>>ind<<', '>>glg<<', '>>wln<<', '>>spa<<', '>>fra<<', '>>ron<<', '>>por<<', '>>ita<<', '>>oci<<', '>>arg<<', '>>min<<']
>>> model = MarianMTModel.from_pretrained(model_name)
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
>>> [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
["c'est une phrase en anglais que nous voulons traduire en français",
'Isto deve ir para o português.',
'Y esto al español']
```
Here is the code to see all available pretrained models on the hub:
```python
from huggingface_hub import list_models
model_list = list_models()
org = "Helsinki-NLP"
model_ids = [x.id for x in model_list if x.id.startswith(org)]
suffix = [x.split("/")[1] for x in model_ids]
old_style_multi_models = [f"{org}/{s}" for s in suffix if s != s.lower()]
```
## Old Style Multi-Lingual Models
These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language
group:
```python no-style
['Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU',
'Helsinki-NLP/opus-mt-ROMANCE-en',
'Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA',
'Helsinki-NLP/opus-mt-de-ZH',
'Helsinki-NLP/opus-mt-en-CELTIC',
'Helsinki-NLP/opus-mt-en-ROMANCE',
'Helsinki-NLP/opus-mt-es-NORWAY',
'Helsinki-NLP/opus-mt-fi-NORWAY',
'Helsinki-NLP/opus-mt-fi-ZH',
'Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI',
'Helsinki-NLP/opus-mt-sv-NORWAY',
'Helsinki-NLP/opus-mt-sv-ZH']
GROUP_MEMBERS = {
'ZH': ['cmn', 'cn', 'yue', 'ze_zh', 'zh_cn', 'zh_CN', 'zh_HK', 'zh_tw', 'zh_TW', 'zh_yue', 'zhs', 'zht', 'zh'],
'ROMANCE': ['fr', 'fr_BE', 'fr_CA', 'fr_FR', 'wa', 'frp', 'oc', 'ca', 'rm', 'lld', 'fur', 'lij', 'lmo', 'es', 'es_AR', 'es_CL', 'es_CO', 'es_CR', 'es_DO', 'es_EC', 'es_ES', 'es_GT', 'es_HN', 'es_MX', 'es_NI', 'es_PA', 'es_PE', 'es_PR', 'es_SV', 'es_UY', 'es_VE', 'pt', 'pt_br', 'pt_BR', 'pt_PT', 'gl', 'lad', 'an', 'mwl', 'it', 'it_IT', 'co', 'nap', 'scn', 'vec', 'sc', 'ro', 'la'],
'NORTH_EU': ['de', 'nl', 'fy', 'af', 'da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'],
'SCANDINAVIA': ['da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'],
'SAMI': ['se', 'sma', 'smj', 'smn', 'sms'],
'NORWAY': ['nb_NO', 'nb', 'nn_NO', 'nn', 'nog', 'no_nb', 'no'],
'CELTIC': ['ga', 'cy', 'br', 'gd', 'kw', 'gv']
}
```
Example of translating english to many romance languages, using old-style 2 character language codes
```python
>>> from transformers import MarianMTModel, MarianTokenizer
>>> src_text = [
... ">>fr<< this is a sentence in english that we want to translate to french",
... ">>pt<< This should go to portuguese",
... ">>es<< And this to Spanish",
... ]
>>> model_name = "Helsinki-NLP/opus-mt-en-ROMANCE"
>>> tokenizer = MarianTokenizer.from_pretrained(model_name)
>>> model = MarianMTModel.from_pretrained(model_name)
>>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
>>> tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
["c'est une phrase en anglais que nous voulons traduire en français",
'Isto deve ir para o português.',
'Y esto al español']
```
## Resources
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
- [Causal language modeling task guide](../tasks/language_modeling)
## MarianConfig
[[autodoc]] MarianConfig
## MarianTokenizer
[[autodoc]] MarianTokenizer
- build_inputs_with_special_tokens
<frameworkcontent>
<pt>
## MarianModel
[[autodoc]] MarianModel
- forward
## MarianMTModel
[[autodoc]] MarianMTModel
- forward
## MarianForCausalLM
[[autodoc]] MarianForCausalLM
- forward
</pt>
<tf>
## TFMarianModel
[[autodoc]] TFMarianModel
- call
## TFMarianMTModel
[[autodoc]] TFMarianMTModel
- call
</tf>
<jax>
## FlaxMarianModel
[[autodoc]] FlaxMarianModel
- __call__
## FlaxMarianMTModel
[[autodoc]] FlaxMarianMTModel
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/marian.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/marian.md",
"repo_id": "transformers",
"token_count": 3062
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MMS
## Overview
The MMS model was proposed in [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516)
by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli
The abstract from the paper is the following:
*Expanding the language coverage of speech technology has the potential to improve access to information for many more people.
However, current speech technology is restricted to about one hundred languages which is a small fraction of the over 7,000
languages spoken around the world.
The Massively Multilingual Speech (MMS) project increases the number of supported languages by 10-40x, depending on the task.
The main ingredients are a new dataset based on readings of publicly available religious texts and effectively leveraging
self-supervised learning. We built pre-trained wav2vec 2.0 models covering 1,406 languages,
a single multilingual automatic speech recognition model for 1,107 languages, speech synthesis models
for the same number of languages, as well as a language identification model for 4,017 languages.
Experiments show that our multilingual speech recognition model more than halves the word error rate of
Whisper on 54 languages of the FLEURS benchmark while being trained on a small fraction of the labeled data.*
Here are the different models open sourced in the MMS project. The models and code are originally released [here](https://github.com/facebookresearch/fairseq/tree/main/examples/mms). We have add them to the `transformers` framework, making them easier to use.
### Automatic Speech Recognition (ASR)
The ASR model checkpoints can be found here : [mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102), [mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107), [mms-1b-all](https://huggingface.co/facebook/mms-1b-all). For best accuracy, use the `mms-1b-all` model.
Tips:
- All ASR models accept a float array corresponding to the raw waveform of the speech signal. The raw waveform should be pre-processed with [`Wav2Vec2FeatureExtractor`].
- The models were trained using connectionist temporal classification (CTC) so the model output has to be decoded using
[`Wav2Vec2CTCTokenizer`].
- You can load different language adapter weights for different languages via [`~Wav2Vec2PreTrainedModel.load_adapter`]. Language adapters only consists of roughly 2 million parameters
and can therefore be efficiently loaded on the fly when needed.
#### Loading
By default MMS loads adapter weights for English. If you want to load adapter weights of another language
make sure to specify `target_lang=<your-chosen-target-lang>` as well as `"ignore_mismatched_sizes=True`.
The `ignore_mismatched_sizes=True` keyword has to be passed to allow the language model head to be resized according
to the vocabulary of the specified language.
Similarly, the processor should be loaded with the same target language
```py
from transformers import Wav2Vec2ForCTC, AutoProcessor
model_id = "facebook/mms-1b-all"
target_lang = "fra"
processor = AutoProcessor.from_pretrained(model_id, target_lang=target_lang)
model = Wav2Vec2ForCTC.from_pretrained(model_id, target_lang=target_lang, ignore_mismatched_sizes=True)
```
<Tip>
You can safely ignore a warning such as:
```text
Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/mms-1b-all and are newly initialized because the shapes did not match:
- lm_head.bias: found shape torch.Size([154]) in the checkpoint and torch.Size([314]) in the model instantiated
- lm_head.weight: found shape torch.Size([154, 1280]) in the checkpoint and torch.Size([314, 1280]) in the model instantiated
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
```
</Tip>
If you want to use the ASR pipeline, you can load your chosen target language as such:
```py
from transformers import pipeline
model_id = "facebook/mms-1b-all"
target_lang = "fra"
pipe = pipeline(model=model_id, model_kwargs={"target_lang": "fra", "ignore_mismatched_sizes": True})
```
#### Inference
Next, let's look at how we can run MMS in inference and change adapter layers after having called [`~PretrainedModel.from_pretrained`]
First, we load audio data in different languages using the [Datasets](https://github.com/huggingface/datasets).
```py
from datasets import load_dataset, Audio
# English
stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True)
stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000))
en_sample = next(iter(stream_data))["audio"]["array"]
# French
stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "fr", split="test", streaming=True)
stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000))
fr_sample = next(iter(stream_data))["audio"]["array"]
```
Next, we load the model and processor
```py
from transformers import Wav2Vec2ForCTC, AutoProcessor
import torch
model_id = "facebook/mms-1b-all"
processor = AutoProcessor.from_pretrained(model_id)
model = Wav2Vec2ForCTC.from_pretrained(model_id)
```
Now we process the audio data, pass the processed audio data to the model and transcribe the model output,
just like we usually do for [`Wav2Vec2ForCTC`].
```py
inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
ids = torch.argmax(outputs, dim=-1)[0]
transcription = processor.decode(ids)
# 'joe keton disapproved of films and buster also had reservations about the media'
```
We can now keep the same model in memory and simply switch out the language adapters by
calling the convenient [`~Wav2Vec2ForCTC.load_adapter`] function for the model and [`~Wav2Vec2CTCTokenizer.set_target_lang`] for the tokenizer.
We pass the target language as an input - `"fra"` for French.
```py
processor.tokenizer.set_target_lang("fra")
model.load_adapter("fra")
inputs = processor(fr_sample, sampling_rate=16_000, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
ids = torch.argmax(outputs, dim=-1)[0]
transcription = processor.decode(ids)
# "ce dernier est volé tout au long de l'histoire romaine"
```
In the same way the language can be switched out for all other supported languages. Please have a look at:
```py
processor.tokenizer.vocab.keys()
```
to see all supported languages.
To further improve performance from ASR models, language model decoding can be used. See the documentation [here](https://huggingface.co/facebook/mms-1b-all) for further details.
### Speech Synthesis (TTS)
MMS-TTS uses the same model architecture as VITS, which was added to ð€ Transformers in v4.33. MMS trains a separate
model checkpoint for each of the 1100+ languages in the project. All available checkpoints can be found on the Hugging
Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts), and the inference
documentation under [VITS](https://huggingface.co/docs/transformers/main/en/model_doc/vits).
#### Inference
To use the MMS model, first update to the latest version of the Transformers library:
```bash
pip install --upgrade transformers accelerate
```
Since the flow-based model in VITS is non-deterministic, it is good practice to set a seed to ensure reproducibility of
the outputs.
- For languages with a Roman alphabet, such as English or French, the tokenizer can be used directly to
pre-process the text inputs. The following code example runs a forward pass using the MMS-TTS English checkpoint:
```python
import torch
from transformers import VitsTokenizer, VitsModel, set_seed
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng")
model = VitsModel.from_pretrained("facebook/mms-tts-eng")
inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt")
set_seed(555) # make deterministic
with torch.no_grad():
outputs = model(**inputs)
waveform = outputs.waveform[0]
```
The resulting waveform can be saved as a `.wav` file:
```python
import scipy
scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=waveform)
```
Or displayed in a Jupyter Notebook / Google Colab:
```python
from IPython.display import Audio
Audio(waveform, rate=model.config.sampling_rate)
```
For certain languages with non-Roman alphabets, such as Arabic, Mandarin or Hindi, the [`uroman`](https://github.com/isi-nlp/uroman)
perl package is required to pre-process the text inputs to the Roman alphabet.
You can check whether you require the `uroman` package for your language by inspecting the `is_uroman` attribute of
the pre-trained `tokenizer`:
```python
from transformers import VitsTokenizer
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng")
print(tokenizer.is_uroman)
```
If required, you should apply the uroman package to your text inputs **prior** to passing them to the `VitsTokenizer`,
since currently the tokenizer does not support performing the pre-processing itself.
To do this, first clone the uroman repository to your local machine and set the bash variable `UROMAN` to the local path:
```bash
git clone https://github.com/isi-nlp/uroman.git
cd uroman
export UROMAN=$(pwd)
```
You can then pre-process the text input using the following code snippet. You can either rely on using the bash variable
`UROMAN` to point to the uroman repository, or you can pass the uroman directory as an argument to the `uromanize` function:
```python
import torch
from transformers import VitsTokenizer, VitsModel, set_seed
import os
import subprocess
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-kor")
model = VitsModel.from_pretrained("facebook/mms-tts-kor")
def uromanize(input_string, uroman_path):
"""Convert non-Roman strings to Roman using the `uroman` perl package."""
script_path = os.path.join(uroman_path, "bin", "uroman.pl")
command = ["perl", script_path]
process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Execute the perl command
stdout, stderr = process.communicate(input=input_string.encode())
if process.returncode != 0:
raise ValueError(f"Error {process.returncode}: {stderr.decode()}")
# Return the output as a string and skip the new-line character at the end
return stdout.decode()[:-1]
text = "ìŽëŽ ë¬Žìš ìŒìŽìŒ"
uromanized_text = uromanize(text, uroman_path=os.environ["UROMAN"])
inputs = tokenizer(text=uromanized_text, return_tensors="pt")
set_seed(555) # make deterministic
with torch.no_grad():
outputs = model(inputs["input_ids"])
waveform = outputs.waveform[0]
```
**Tips:**
* The MMS-TTS checkpoints are trained on lower-cased, un-punctuated text. By default, the `VitsTokenizer` *normalizes* the inputs by removing any casing and punctuation, to avoid passing out-of-vocabulary characters to the model. Hence, the model is agnostic to casing and punctuation, so these should be avoided in the text prompt. You can disable normalisation by setting `normalize=False` in the call to the tokenizer, but this will lead to un-expected behaviour and is discouraged.
* The speaking rate can be varied by setting the attribute `model.speaking_rate` to a chosen value. Likewise, the randomness of the noise is controlled by `model.noise_scale`:
```python
import torch
from transformers import VitsTokenizer, VitsModel, set_seed
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng")
model = VitsModel.from_pretrained("facebook/mms-tts-eng")
inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt")
# make deterministic
set_seed(555)
# make speech faster and more noisy
model.speaking_rate = 1.5
model.noise_scale = 0.8
with torch.no_grad():
outputs = model(**inputs)
```
### Language Identification (LID)
Different LID models are available based on the number of languages they can recognize - [126](https://huggingface.co/facebook/mms-lid-126), [256](https://huggingface.co/facebook/mms-lid-256), [512](https://huggingface.co/facebook/mms-lid-512), [1024](https://huggingface.co/facebook/mms-lid-1024), [2048](https://huggingface.co/facebook/mms-lid-2048), [4017](https://huggingface.co/facebook/mms-lid-4017).
#### Inference
First, we install transformers and some other libraries
```bash
pip install torch accelerate datasets[audio]
pip install --upgrade transformers
````
Next, we load a couple of audio samples via `datasets`. Make sure that the audio data is sampled to 16000 kHz.
```py
from datasets import load_dataset, Audio
# English
stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True)
stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000))
en_sample = next(iter(stream_data))["audio"]["array"]
# Arabic
stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "ar", split="test", streaming=True)
stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000))
ar_sample = next(iter(stream_data))["audio"]["array"]
```
Next, we load the model and processor
```py
from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor
import torch
model_id = "facebook/mms-lid-126"
processor = AutoFeatureExtractor.from_pretrained(model_id)
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_id)
```
Now we process the audio data, pass the processed audio data to the model to classify it into a language, just like we usually do for Wav2Vec2 audio classification models such as [ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition](https://huggingface.co/harshit345/xlsr-wav2vec-speech-emotion-recognition)
```py
# English
inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
lang_id = torch.argmax(outputs, dim=-1)[0].item()
detected_lang = model.config.id2label[lang_id]
# 'eng'
# Arabic
inputs = processor(ar_sample, sampling_rate=16_000, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
lang_id = torch.argmax(outputs, dim=-1)[0].item()
detected_lang = model.config.id2label[lang_id]
# 'ara'
```
To see all the supported languages of a checkpoint, you can print out the language ids as follows:
```py
processor.id2label.values()
```
### Audio Pretrained Models
Pretrained models are available for two different sizes - [300M](https://huggingface.co/facebook/mms-300m) ,
[1Bil](https://huggingface.co/facebook/mms-1b).
<Tip>
The MMS for ASR architecture is based on the Wav2Vec2 model, refer to [Wav2Vec2's documentation page](wav2vec2) for further
details on how to finetune with models for various downstream tasks.
MMS-TTS uses the same model architecture as VITS, refer to [VITS's documentation page](vits) for API reference.
</Tip>
| transformers/docs/source/en/model_doc/mms.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/mms.md",
"repo_id": "transformers",
"token_count": 4924
} |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# RemBERT
## Overview
The RemBERT model was proposed in [Rethinking Embedding Coupling in Pre-trained Language Models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, Melvin Johnson, Sebastian Ruder.
The abstract from the paper is the following:
*We re-evaluate the standard practice of sharing weights between input and output embeddings in state-of-the-art
pre-trained language models. We show that decoupled embeddings provide increased modeling flexibility, allowing us to
significantly improve the efficiency of parameter allocation in the input embedding of multilingual models. By
reallocating the input embedding parameters in the Transformer layers, we achieve dramatically better performance on
standard natural language understanding tasks with the same number of parameters during fine-tuning. We also show that
allocating additional capacity to the output embedding provides benefits to the model that persist through the
fine-tuning stage even though the output embedding is discarded after pre-training. Our analysis shows that larger
output embeddings prevent the model's last layers from overspecializing to the pre-training task and encourage
Transformer representations to be more general and more transferable to other tasks and languages. Harnessing these
findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the
number of parameters at the fine-tuning stage.*
## Usage tips
For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the
embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input
embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is
also similar to the Albert one rather than the BERT one.
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## RemBertConfig
[[autodoc]] RemBertConfig
## RemBertTokenizer
[[autodoc]] RemBertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## RemBertTokenizerFast
[[autodoc]] RemBertTokenizerFast
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
<frameworkcontent>
<pt>
## RemBertModel
[[autodoc]] RemBertModel
- forward
## RemBertForCausalLM
[[autodoc]] RemBertForCausalLM
- forward
## RemBertForMaskedLM
[[autodoc]] RemBertForMaskedLM
- forward
## RemBertForSequenceClassification
[[autodoc]] RemBertForSequenceClassification
- forward
## RemBertForMultipleChoice
[[autodoc]] RemBertForMultipleChoice
- forward
## RemBertForTokenClassification
[[autodoc]] RemBertForTokenClassification
- forward
## RemBertForQuestionAnswering
[[autodoc]] RemBertForQuestionAnswering
- forward
</pt>
<tf>
## TFRemBertModel
[[autodoc]] TFRemBertModel
- call
## TFRemBertForMaskedLM
[[autodoc]] TFRemBertForMaskedLM
- call
## TFRemBertForCausalLM
[[autodoc]] TFRemBertForCausalLM
- call
## TFRemBertForSequenceClassification
[[autodoc]] TFRemBertForSequenceClassification
- call
## TFRemBertForMultipleChoice
[[autodoc]] TFRemBertForMultipleChoice
- call
## TFRemBertForTokenClassification
[[autodoc]] TFRemBertForTokenClassification
- call
## TFRemBertForQuestionAnswering
[[autodoc]] TFRemBertForQuestionAnswering
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/rembert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/rembert.md",
"repo_id": "transformers",
"token_count": 1363
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ViTMAE
## Overview
The ViTMAE model was proposed in [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377v2) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li,
Piotr Dollár, Ross Girshick. The paper shows that, by pre-training a Vision Transformer (ViT) to reconstruct pixel values for masked patches, one can get results after
fine-tuning that outperform supervised pre-training.
The abstract from the paper is the following:
*This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the
input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates
only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask
tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs
enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity
models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream
tasks outperforms supervised pre-training and shows promising scaling behavior.*
<img src="https://user-images.githubusercontent.com/11435359/146857310-f258c86c-fde6-48e8-9cee-badd2b21bd2c.png"
alt="drawing" width="600"/>
<small> MAE architecture. Taken from the <a href="https://arxiv.org/abs/2111.06377">original paper.</a> </small>
This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [sayakpaul](https://github.com/sayakpaul) and
[ariG23498](https://github.com/ariG23498) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/mae).
## Usage tips
- MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple:
by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use [`ViTMAEForPreTraining`] for this purpose.
- After pre-training, one "throws away" the decoder used to reconstruct pixels, and one uses the encoder for fine-tuning/linear probing. This means that after
fine-tuning, one can directly plug in the weights into a [`ViTForImageClassification`].
- One can use [`ViTImageProcessor`] to prepare images for the model. See the code examples for more info.
- Note that the encoder of MAE is only used to encode the visual patches. The encoded patches are then concatenated with mask tokens, which the decoder (which also
consists of Transformer blocks) takes as input. Each mask token is a shared, learned vector that indicates the presence of a missing patch to be predicted. Fixed
sin/cos position embeddings are added both to the input of the encoder and the decoder.
- For a visual understanding of how MAEs work you can check out this [post](https://keras.io/examples/vision/masked_image_modeling/).
### Using Scaled Dot Product Attention (SDPA)
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
page for more information.
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
```
from transformers import ViTMAEModel
model = ViTMAEModel.from_pretrained("facebook/vit-mae-base", attn_implementation="sdpa", torch_dtype=torch.float16)
...
```
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
On a local benchmark (A100-40GB, PyTorch 2.3.0, OS Ubuntu 22.04) with `float32` and `facebook/vit-mae-base` model, we saw the following speedups during inference.
| Batch size | Average inference time (ms), eager mode | Average inference time (ms), sdpa model | Speed up, Sdpa / Eager (x) |
|--------------|-------------------------------------------|-------------------------------------------|------------------------------|
| 1 | 11 | 6 | 1.83 |
| 2 | 8 | 6 | 1.33 |
| 4 | 8 | 6 | 1.33 |
| 8 | 8 | 6 | 1.33 |
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with ViTMAE.
- [`ViTMAEForPreTraining`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining), allowing you to pre-train the model from scratch/further pre-train the model on custom data.
- A notebook that illustrates how to visualize reconstructed pixel values with [`ViTMAEForPreTraining`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/ViTMAE/ViT_MAE_visualization_demo.ipynb).
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## ViTMAEConfig
[[autodoc]] ViTMAEConfig
<frameworkcontent>
<pt>
## ViTMAEModel
[[autodoc]] ViTMAEModel
- forward
## ViTMAEForPreTraining
[[autodoc]] transformers.ViTMAEForPreTraining
- forward
</pt>
<tf>
## TFViTMAEModel
[[autodoc]] TFViTMAEModel
- call
## TFViTMAEForPreTraining
[[autodoc]] transformers.TFViTMAEForPreTraining
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/vit_mae.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/vit_mae.md",
"repo_id": "transformers",
"token_count": 2432
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# XLM-RoBERTa-XL
## Overview
The XLM-RoBERTa-XL model was proposed in [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
The abstract from the paper is the following:
*Recent work has demonstrated the effectiveness of cross-lingual language model pretraining for cross-lingual understanding. In this study, we present the results of two larger multilingual masked language models, with 3.5B and 10.7B parameters. Our two new models dubbed XLM-R XL and XLM-R XXL outperform XLM-R by 1.8% and 2.4% average accuracy on XNLI. Our model also outperforms the RoBERTa-Large model on several English tasks of the GLUE benchmark by 0.3% on average while handling 99 more languages. This suggests pretrained models with larger capacity may obtain both strong performance on high-resource languages while greatly improving low-resource languages. We make our code and models publicly available.*
This model was contributed by [Soonhwan-Kwon](https://github.com/Soonhwan-Kwon) and [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr).
## Usage tips
XLM-RoBERTa-XL is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does
not require `lang` tensors to understand which language is used, and should be able to determine the correct
language from the input ids.
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Causal language modeling task guide](../tasks/language_modeling)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## XLMRobertaXLConfig
[[autodoc]] XLMRobertaXLConfig
## XLMRobertaXLModel
[[autodoc]] XLMRobertaXLModel
- forward
## XLMRobertaXLForCausalLM
[[autodoc]] XLMRobertaXLForCausalLM
- forward
## XLMRobertaXLForMaskedLM
[[autodoc]] XLMRobertaXLForMaskedLM
- forward
## XLMRobertaXLForSequenceClassification
[[autodoc]] XLMRobertaXLForSequenceClassification
- forward
## XLMRobertaXLForMultipleChoice
[[autodoc]] XLMRobertaXLForMultipleChoice
- forward
## XLMRobertaXLForTokenClassification
[[autodoc]] XLMRobertaXLForTokenClassification
- forward
## XLMRobertaXLForQuestionAnswering
[[autodoc]] XLMRobertaXLForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/xlm-roberta-xl.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/xlm-roberta-xl.md",
"repo_id": "transformers",
"token_count": 969
} |
<!---
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Performance and Scalability
Training large transformer models and deploying them to production present various challenges.
During training, the model may require more GPU memory than available or exhibit slow training speed. In the deployment
phase, the model can struggle to handle the required throughput in a production environment.
This documentation aims to assist you in overcoming these challenges and finding the optimal settings for your use-case.
The guides are divided into training and inference sections, as each comes with different challenges and solutions.
Within each section you'll find separate guides for different hardware configurations, such as single GPU vs. multi-GPU
for training or CPU vs. GPU for inference.
Use this document as your starting point to navigate further to the methods that match your scenario.
## Training
Training large transformer models efficiently requires an accelerator such as a GPU or TPU. The most common case is where
you have a single GPU. The methods that you can apply to improve training efficiency on a single GPU extend to other setups
such as multiple GPU. However, there are also techniques that are specific to multi-GPU or CPU training. We cover them in
separate sections.
* [Methods and tools for efficient training on a single GPU](perf_train_gpu_one): start here to learn common approaches that can help optimize GPU memory utilization, speed up the training, or both.
* [Multi-GPU training section](perf_train_gpu_many): explore this section to learn about further optimization methods that apply to a multi-GPU settings, such as data, tensor, and pipeline parallelism.
* [CPU training section](perf_train_cpu): learn about mixed precision training on CPU.
* [Efficient Training on Multiple CPUs](perf_train_cpu_many): learn about distributed CPU training.
* [Training on TPU with TensorFlow](perf_train_tpu_tf): if you are new to TPUs, refer to this section for an opinionated introduction to training on TPUs and using XLA.
* [Custom hardware for training](perf_hardware): find tips and tricks when building your own deep learning rig.
* [Hyperparameter Search using Trainer API](hpo_train)
## Inference
Efficient inference with large models in a production environment can be as challenging as training them. In the following
sections we go through the steps to run inference on CPU and single/multi-GPU setups.
* [Inference on a single CPU](perf_infer_cpu)
* [Inference on a single GPU](perf_infer_gpu_one)
* [Multi-GPU inference](perf_infer_gpu_multi)
* [XLA Integration for TensorFlow Models](tf_xla)
## Training and inference
Here you'll find techniques, tips and tricks that apply whether you are training a model, or running inference with it.
* [Instantiating a big model](big_models)
* [Troubleshooting performance issues](debugging)
## Contribute
This document is far from being complete and a lot more needs to be added, so if you have additions or corrections to
make please don't hesitate to open a PR or if you aren't sure start an Issue and we can discuss the details there.
When making contributions that A is better than B, please try to include a reproducible benchmark and/or a link to the
source of that information (unless it comes directly from you).
| transformers/docs/source/en/performance.md/0 | {
"file_path": "transformers/docs/source/en/performance.md",
"repo_id": "transformers",
"token_count": 966
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# HIGGS
HIGGS is a 0-shot quantization algorithm that combines Hadamard preprocessing with MSE-Optimal quantization grids to achieve lower quantization error and SOTA performance. You can find more information in the paper [arxiv.org/abs/2411.17525](https://arxiv.org/abs/2411.17525).
Runtime support for HIGGS is implemented through [FLUTE](https://arxiv.org/abs/2407.10960), and its [library](https://github.com/HanGuo97/flute).
## Quantization Example
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, HiggsConfig
model = AutoModelForCausalLM.from_pretrained(
"google/gemma-2-9b-it",
quantization_config=HiggsConfig(bits=4),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
tokenizer.decode(model.generate(
**tokenizer("Hi,", return_tensors="pt").to(model.device),
temperature=0.5,
top_p=0.80,
)[0])
```
## Pre-quantized models
Some pre-quantized models can be found in the [official collection](https://huggingface.co/collections/ISTA-DASLab/higgs-675308e432fd56b7f6dab94e) on Hugging Face Hub.
## Current Limitations
**Architectures**
Currently, FLUTE, and HIGGS by extension, **only support Llama 3 and 3.0 of 8B, 70B and 405B parameters, as well as Gemma-2 9B and 27B**. We're working on allowing to run more diverse models as well as allow arbitrary models by modifying the FLUTE compilation procedure.
**torch.compile**
HIGGS is fully compatible with `torch.compile`. Compiling `model.forward`, as described [here](../perf_torch_compile.md), here're the speedups it provides on RTX 4090 for `Llama-3.1-8B-Instruct` (forward passes/sec):
| Batch Size | BF16 (With `torch.compile`) | HIGGS 4bit (No `torch.compile`) | HIGGS 4bit (With `torch.compile`) |
|------------|-----------------------------|----------------------------------|-----------------------------------|
| 1 | 59 | 41 | 124 |
| 4 | 57 | 42 | 123 |
| 16 | 56 | 41 | 120 |
**Quantized training**
Currently, HIGGS doesn't support quantized training (and backward passes in general). We're working on adding support for it. | transformers/docs/source/en/quantization/higgs.md/0 | {
"file_path": "transformers/docs/source/en/quantization/higgs.md",
"repo_id": "transformers",
"token_count": 1149
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Image captioning
[[open-in-colab]]
Image captioning is the task of predicting a caption for a given image. Common real world applications of it include
aiding visually impaired people that can help them navigate through different situations. Therefore, image captioning
helps to improve content accessibility for people by describing images to them.
This guide will show you how to:
* Fine-tune an image captioning model.
* Use the fine-tuned model for inference.
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers datasets evaluate -q
pip install jiwer -q
```
We encourage you to log in to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to log in:
```python
from huggingface_hub import notebook_login
notebook_login()
```
## Load the Pokémon BLIP captions dataset
Use the ð€ Dataset library to load a dataset that consists of {image-caption} pairs. To create your own image captioning dataset
in PyTorch, you can follow [this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb).
```python
from datasets import load_dataset
ds = load_dataset("lambdalabs/pokemon-blip-captions")
ds
```
```bash
DatasetDict({
train: Dataset({
features: ['image', 'text'],
num_rows: 833
})
})
```
The dataset has two features, `image` and `text`.
<Tip>
Many image captioning datasets contain multiple captions per image. In those cases, a common strategy is to randomly sample a caption amongst the available ones during training.
</Tip>
Split the datasetâs train split into a train and test set with the [`~datasets.Dataset.train_test_split`] method:
```python
ds = ds["train"].train_test_split(test_size=0.1)
train_ds = ds["train"]
test_ds = ds["test"]
```
Let's visualize a couple of samples from the training set.
```python
from textwrap import wrap
import matplotlib.pyplot as plt
import numpy as np
def plot_images(images, captions):
plt.figure(figsize=(20, 20))
for i in range(len(images)):
ax = plt.subplot(1, len(images), i + 1)
caption = captions[i]
caption = "\n".join(wrap(caption, 12))
plt.title(caption)
plt.imshow(images[i])
plt.axis("off")
sample_images_to_visualize = [np.array(train_ds[i]["image"]) for i in range(5)]
sample_captions = [train_ds[i]["text"] for i in range(5)]
plot_images(sample_images_to_visualize, sample_captions)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_training_images_image_cap.png" alt="Sample training images"/>
</div>
## Preprocess the dataset
Since the dataset has two modalities (image and text), the pre-processing pipeline will preprocess images and the captions.
To do so, load the processor class associated with the model you are about to fine-tune.
```python
from transformers import AutoProcessor
checkpoint = "microsoft/git-base"
processor = AutoProcessor.from_pretrained(checkpoint)
```
The processor will internally pre-process the image (which includes resizing, and pixel scaling) and tokenize the caption.
```python
def transforms(example_batch):
images = [x for x in example_batch["image"]]
captions = [x for x in example_batch["text"]]
inputs = processor(images=images, text=captions, padding="max_length")
inputs.update({"labels": inputs["input_ids"]})
return inputs
train_ds.set_transform(transforms)
test_ds.set_transform(transforms)
```
With the dataset ready, you can now set up the model for fine-tuning.
## Load a base model
Load the ["microsoft/git-base"](https://huggingface.co/microsoft/git-base) into a [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) object.
```python
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(checkpoint)
```
## Evaluate
Image captioning models are typically evaluated with the [Rouge Score](https://huggingface.co/spaces/evaluate-metric/rouge) or [Word Error Rate](https://huggingface.co/spaces/evaluate-metric/wer). For this guide, you will use the Word Error Rate (WER).
We use the ð€ Evaluate library to do so. For potential limitations and other gotchas of the WER, refer to [this guide](https://huggingface.co/spaces/evaluate-metric/wer).
```python
from evaluate import load
import torch
wer = load("wer")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predicted = logits.argmax(-1)
decoded_labels = processor.batch_decode(labels, skip_special_tokens=True)
decoded_predictions = processor.batch_decode(predicted, skip_special_tokens=True)
wer_score = wer.compute(predictions=decoded_predictions, references=decoded_labels)
return {"wer_score": wer_score}
```
## Train!
Now, you are ready to start fine-tuning the model. You will use the ð€ [`Trainer`] for this.
First, define the training arguments using [`TrainingArguments`].
```python
from transformers import TrainingArguments, Trainer
model_name = checkpoint.split("/")[1]
training_args = TrainingArguments(
output_dir=f"{model_name}-pokemon",
learning_rate=5e-5,
num_train_epochs=50,
fp16=True,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
gradient_accumulation_steps=2,
save_total_limit=3,
eval_strategy="steps",
eval_steps=50,
save_strategy="steps",
save_steps=50,
logging_steps=50,
remove_unused_columns=False,
push_to_hub=True,
label_names=["labels"],
load_best_model_at_end=True,
)
```
Then pass them along with the datasets and the model to ð€ Trainer.
```python
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_ds,
eval_dataset=test_ds,
compute_metrics=compute_metrics,
)
```
To start training, simply call [`~Trainer.train`] on the [`Trainer`] object.
```python
trainer.train()
```
You should see the training loss drop smoothly as training progresses.
Once training is completed, share your model to the Hub with the [`~Trainer.push_to_hub`] method so everyone can use your model:
```python
trainer.push_to_hub()
```
## Inference
Take a sample image from `test_ds` to test the model.
```python
from PIL import Image
import requests
url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png"
image = Image.open(requests.get(url, stream=True).raw)
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/test_image_image_cap.png" alt="Test image"/>
</div>
Prepare image for the model.
```python
from accelerate.test_utils.testing import get_backend
# automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.)
device, _, _ = get_backend()
inputs = processor(images=image, return_tensors="pt").to(device)
pixel_values = inputs.pixel_values
```
Call [`generate`] and decode the predictions.
```python
generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(generated_caption)
```
```bash
a drawing of a pink and blue pokemon
```
Looks like the fine-tuned model generated a pretty good caption!
| transformers/docs/source/en/tasks/image_captioning.md/0 | {
"file_path": "transformers/docs/source/en/tasks/image_captioning.md",
"repo_id": "transformers",
"token_count": 2730
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Export to TorchScript
<Tip>
This is the very beginning of our experiments with TorchScript and we are still
exploring its capabilities with variable-input-size models. It is a focus of interest to
us and we will deepen our analysis in upcoming releases, with more code examples, a more
flexible implementation, and benchmarks comparing Python-based codes with compiled
TorchScript.
</Tip>
According to the [TorchScript documentation](https://pytorch.org/docs/stable/jit.html):
> TorchScript is a way to create serializable and optimizable models from PyTorch code.
There are two PyTorch modules, [JIT and
TRACE](https://pytorch.org/docs/stable/jit.html), that allow developers to export their
models to be reused in other programs like efficiency-oriented C++ programs.
We provide an interface that allows you to export ð€ Transformers models to TorchScript
so they can be reused in a different environment than PyTorch-based Python programs.
Here, we explain how to export and use our models using TorchScript.
Exporting a model requires two things:
- model instantiation with the `torchscript` flag
- a forward pass with dummy inputs
These necessities imply several things developers should be careful about as detailed
below.
## TorchScript flag and tied weights
The `torchscript` flag is necessary because most of the ð€ Transformers language models
have tied weights between their `Embedding` layer and their `Decoding` layer.
TorchScript does not allow you to export models that have tied weights, so it is
necessary to untie and clone the weights beforehand.
Models instantiated with the `torchscript` flag have their `Embedding` layer and
`Decoding` layer separated, which means that they should not be trained down the line.
Training would desynchronize the two layers, leading to unexpected results.
This is not the case for models that do not have a language model head, as those do not
have tied weights. These models can be safely exported without the `torchscript` flag.
## Dummy inputs and standard lengths
The dummy inputs are used for a models forward pass. While the inputs' values are
propagated through the layers, PyTorch keeps track of the different operations executed
on each tensor. These recorded operations are then used to create the *trace* of the
model.
The trace is created relative to the inputs' dimensions. It is therefore constrained by
the dimensions of the dummy input, and will not work for any other sequence length or
batch size. When trying with a different size, the following error is raised:
```
`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2`
```
We recommended you trace the model with a dummy input size at least as large as the
largest input that will be fed to the model during inference. Padding can help fill the
missing values. However, since the model is traced with a larger input size, the
dimensions of the matrix will also be large, resulting in more calculations.
Be careful of the total number of operations done on each input and follow the
performance closely when exporting varying sequence-length models.
## Using TorchScript in Python
This section demonstrates how to save and load models as well as how to use the trace
for inference.
### Saving a model
To export a `BertModel` with TorchScript, instantiate `BertModel` from the `BertConfig`
class and then save it to disk under the filename `traced_bert.pt`:
```python
from transformers import BertModel, BertTokenizer, BertConfig
import torch
enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
# Tokenizing input text
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = enc.tokenize(text)
# Masking one of the input tokens
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Creating a dummy input
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
dummy_input = [tokens_tensor, segments_tensors]
# Initializing the model with the torchscript flag
# Flag set to True even though it is not necessary as this model does not have an LM Head.
config = BertConfig(
vocab_size_or_config_json_file=32000,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
torchscript=True,
)
# Instantiating the model
model = BertModel(config)
# The model needs to be in evaluation mode
model.eval()
# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag
model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True)
# Creating the trace
traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
torch.jit.save(traced_model, "traced_bert.pt")
```
### Loading a model
Now you can load the previously saved `BertModel`, `traced_bert.pt`, from disk and use
it on the previously initialised `dummy_input`:
```python
loaded_model = torch.jit.load("traced_bert.pt")
loaded_model.eval()
all_encoder_layers, pooled_output = loaded_model(*dummy_input)
```
### Using a traced model for inference
Use the traced model for inference by using its `__call__` dunder method:
```python
traced_model(tokens_tensor, segments_tensors)
```
## Deploy Hugging Face TorchScript models to AWS with the Neuron SDK
AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/)
instance family for low cost, high performance machine learning inference in the cloud.
The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware
accelerator, specializing in deep learning inferencing workloads. [AWS
Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) is the SDK for
Inferentia that supports tracing and optimizing transformers models for deployment on
Inf1. The Neuron SDK provides:
1. Easy-to-use API with one line of code change to trace and optimize a TorchScript
model for inference in the cloud.
2. Out of the box performance optimizations for [improved
cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>).
3. Support for Hugging Face transformers models built with either
[PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html)
or
[TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html).
### Implications
Transformers models based on the [BERT (Bidirectional Encoder Representations from
Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert)
architecture, or its variants such as
[distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) and
[roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) run best on
Inf1 for non-generative tasks such as extractive question answering, sequence
classification, and token classification. However, text generation tasks can still be
adapted to run on Inf1 according to this [AWS Neuron MarianMT
tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html).
More information about models that can be converted out of the box on Inferentia can be
found in the [Model Architecture
Fit](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia)
section of the Neuron documentation.
### Dependencies
Using AWS Neuron to convert models requires a [Neuron SDK
environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide)
which comes preconfigured on [AWS Deep Learning
AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html).
### Converting a model for AWS Neuron
Convert a model for AWS NEURON using the same code from [Using TorchScript in
Python](torchscript#using-torchscript-in-python) to trace a `BertModel`. Import the
`torch.neuron` framework extension to access the components of the Neuron SDK through a
Python API:
```python
from transformers import BertModel, BertTokenizer, BertConfig
import torch
import torch.neuron
```
You only need to modify the following line:
```diff
- torch.jit.trace(model, [tokens_tensor, segments_tensors])
+ torch.neuron.trace(model, [tokens_tensor, segments_tensors])
```
This enables the Neuron SDK to trace the model and optimize it for Inf1 instances.
To learn more about AWS Neuron SDK features, tools, example tutorials and latest
updates, please see the [AWS NeuronSDK
documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
| transformers/docs/source/en/torchscript.md/0 | {
"file_path": "transformers/docs/source/en/torchscript.md",
"repo_id": "transformers",
"token_count": 2742
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Debugging
## Debug de problemas de Network multi-GPU
Cuando entrenas o infieres con `DistributedDataParallel` y varias GPUs, si encuentras problemas de intercomunicación entre procesos y/o nodos, puedes usar el siguiente script para diagnosticar problemas de red.
```bash
wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py
```
Por ejemplo, para probar cómo interactúan 2 GPUs, haz lo siguiente:
```bash
python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
```
Si ambos procesos pueden hablar entre sà y asignar la memoria de la GPU, cada uno imprimirá un status OK.
Para más GPUs o nodos, ajusta los argumentos en el script.
Encontrarás muchos más detalles dentro del script de diagnóstico e incluso una receta de cómo ejecutarlo en un entorno SLURM.
Un nivel adicional de debug es agregar la variable de entorno `NCCL_DEBUG=INFO` de la siguiente manera:
```bash
NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
```
Esto mostrará mucha información de debug relacionada con NCCL, que luego puedes buscar online si encuentras que reporta algún problema. O si no estás seguro de cómo interpretar el output, puedes compartir el archivo de log en un Issue.
## Detección de Underflow y Overflow
<Tip>
Esta función está disponible actualmente sólo para PyTorch.
</Tip>
<Tip>
Para el entrenamiento multi-GPU, requiere DDP (`torch.distributed.launch`).
</Tip>
<Tip>
Esta función puede utilizarse con cualquier modelo basado en `nn.Module`.
</Tip>
Si empiezas a obtener `loss=NaN` o el modelo muestra algún otro comportamiento anormal debido a `inf` o `nan` en
activations o weights hay que descubrir dónde se produce el primer underflow o overflow y qué lo ha provocado. Por suerte
puedes lograrlo fácilmente activando un módulo especial que hará la detección automáticamente.
Si estás usando [`Trainer`], solo necesitas añadir:
```bash
--debug underflow_overflow
```
a los argumentos normales de la lÃnea de comandos, o pasar `debug="underflow_overflow"` al crear el objeto [`TrainingArguments`].
Si estás usando tu propio bucle de entrenamiento u otro Trainer puedes lograr lo mismo con:
```python
from .debug_utils import DebugUnderflowOverflow
debug_overflow = DebugUnderflowOverflow(model)
```
[`~debug_utils.DebugUnderflowOverflow`] inserta hooks en el modelo que inmediatamente después de cada forward
testeará las variables de input y output y también los weights del módulo correspondiente. Tan pronto como se detecte `inf` o
`nan` se detecta en al menos un elemento de las activations o weights, el programa afirmará e imprimirá un informe
como este (esto fue capturado con `google/mt5-small` bajo fp16 mixed precision):
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
encoder.block.1.layer.1.DenseReluDense.dropout Dropout
0.00e+00 2.57e+02 input[0]
0.00e+00 2.85e+02 output
[...]
encoder.block.2.layer.0 T5LayerSelfAttention
6.78e-04 3.15e+03 input[0]
2.65e-04 3.42e+03 output[0]
None output[1]
2.25e-01 1.00e+04 output[2]
encoder.block.2.layer.1.layer_norm T5LayerNorm
8.69e-02 4.18e-01 weight
2.65e-04 3.42e+03 input[0]
1.79e-06 4.65e+00 output
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.dropout Dropout
0.00e+00 8.76e+03 input[0]
0.00e+00 9.74e+03 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
El output del ejemplo se ha recortado en el centro por razones de brevedad.
La segunda columna muestra el valor del elemento más grande en términos absolutos, por lo que si observas con detenimiento los últimos fotogramas,
los inputs y outputs estaban en el rango de `1e4`. Asà que cuando este entrenamiento se hizo con fp16 mixed precision,
el último paso sufrió overflow (ya que bajo `fp16` el mayor número antes de `inf` es `64e3`). Para evitar overflows en
`fp16` las activations deben permanecer muy por debajo de `1e4`, porque `1e4 * 1e4 = 1e8` por lo que cualquier matrix multiplication con
grandes activations va a llevar a una condición de overflow numérico.
Al principio del output puedes descubrir en qué número de batch se produjo el problema (aquà `Detected inf/nan during batch_number=0` significa que el problema se produjo en el primer batch).
Cada frame del informe comienza declarando la entrada completamente calificada para el módulo correspondiente que este frame está reportando.
Si nos fijamos sólo en este frame:
```
encoder.block.2.layer.1.layer_norm T5LayerNorm
8.69e-02 4.18e-01 weight
2.65e-04 3.42e+03 input[0]
1.79e-06 4.65e+00 output
```
AquÃ, `encoder.block.2.layer.1.layer_norm` indica que era una layer norm para la primera capa, del segundo
block del encoder. Y la call especÃfica del `forward` es `T5LayerNorm`.
Veamos los últimos frames de ese informe:
```
Detected inf/nan during batch_number=0
Last 21 forward frames:
abs min abs max metadata
[...]
encoder.block.2.layer.1.DenseReluDense.wi_0 Linear
2.17e-07 4.50e+00 weight
1.79e-06 4.65e+00 input[0]
2.68e-06 3.70e+01 output
encoder.block.2.layer.1.DenseReluDense.wi_1 Linear
8.08e-07 2.66e+01 weight
1.79e-06 4.65e+00 input[0]
1.27e-04 2.37e+02 output
encoder.block.2.layer.1.DenseReluDense.wo Linear
1.01e-06 6.44e+00 weight
0.00e+00 9.74e+03 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense
1.79e-06 4.65e+00 input[0]
3.18e-04 6.27e+04 output
encoder.block.2.layer.1.dropout Dropout
3.18e-04 6.27e+04 input[0]
0.00e+00 inf output
```
El último frame informa para la función `Dropout.forward` con la primera entrada para el único input y la segunda para el
único output. Puedes ver que fue llamada desde un atributo `dropout` dentro de la clase `DenseReluDense`. Podemos ver
que ocurrió durante la primera capa, del segundo block, durante el primer batch. Por último, el mayor absoluto
elementos de input fue `6.27e+04` y el mismo para el output fue `inf`.
Puedes ver aquÃ, que `T5DenseGatedGeluDense.forward` resultó en output activations, cuyo valor máximo absoluto fue
alrededor de 62.7K, que está muy cerca del lÃmite máximo de fp16 de 64K. En el siguiente frame tenemos `Dropout`, el cual renormaliza
los weights, después de poner a cero algunos de los elementos, lo que empuja el valor máximo absoluto a más de 64K, y obtenemos un
overflow (`inf`).
Como puedes ver son los frames anteriores los que tenemos que mirar cuando los números empiezan a ser muy grandes para números fp16.
Combinemos el informe con el código de `models/t5/modeling_t5.py`:
```python
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
```
Ahora es fácil ver la call `dropout`, y también todas las calls anteriores.
Dado que la detección se produce en un forward hook, estos informes se imprimen inmediatamente después de que cada `forward`
responda.
Volviendo al informe completo, para actuar sobre él y arreglar el problema, tenemos que subir unos cuantos frames donde los números
empezaron a subir y probablemente cambiar al modo `fp32` aquÃ, para que los números no sufran overflow cuando se multipliquen
o al sumarlos. Por supuesto, puede haber otras soluciones. Por ejemplo, podrÃamos desactivar `amp` temporalmente si está
activado, después de mover el original `forward` dentro de un helper wrapper, asÃ:
```python
def _forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
import torch
def forward(self, hidden_states):
if torch.is_autocast_enabled():
with torch.cuda.amp.autocast(enabled=False):
return self._forward(hidden_states)
else:
return self._forward(hidden_states)
```
Como el detector automático sólo informa de los inputs y outputs de los frames completos, una vez que sepas dónde buscar, puedes
analizar también las etapas intermedias de una función especÃfica de `forward`. En este caso, puede utilizar la función
función de ayuda `detect_overflow` para inyectar el detector donde quieras, por ejemplo:
```python
from debug_utils import detect_overflow
class T5LayerFF(nn.Module):
[...]
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
detect_overflow(forwarded_states, "after layer_norm")
forwarded_states = self.DenseReluDense(forwarded_states)
detect_overflow(forwarded_states, "after DenseReluDense")
return hidden_states + self.dropout(forwarded_states)
```
Puedes ver que hemos añadido 2 de estos y ahora se trackea si `inf` o `nan` para `forwarded_states` fue detectado
en algún punto intermedio.
De hecho, el detector ya informa de esto porque cada una de las llamadas en el ejemplo anterior es un `nn.Module`, pero
digamos que si tuvieras algunos cálculos directos locales, asà es como lo harÃas.
Además, si estás instanciando el debugger en tu propio código, puedes ajustar el número de frames impresos de
su valor por defecto, por ejemplo:
```python
from .debug_utils import DebugUnderflowOverflow
debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)
```
### Rastreo de valores mÃnimos y máximos absolutos de batches especÃficos
La misma clase de debugging se puede utilizar para el rastreo por batches con la función de detección de underflow/overflow desactivada.
Digamos que quieres ver los valores mÃnimos y máximos absolutos de todos los ingredientes de cada call `forward` de un determinado
batch, y sólo hacerlo para los batches 1 y 3. Entonces instancias esta clase como:
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])
```
Y ahora los batches 1 y 3 completos serán rastreados usando el mismo formato que el detector de underflow/overflow.
Los batches son 0-index.
Esto es muy útil si sabes que el programa empieza a comportarse mal después de un determinado número de batch, para que puedas avanzar rápidamente
hasta esa área. Aquà hay un ejemplo de output recortado para tal configuración:
```
*** Starting batch number=1 ***
abs min abs max metadata
shared Embedding
1.01e-06 7.92e+02 weight
0.00e+00 2.47e+04 input[0]
5.36e-05 7.92e+02 output
[...]
decoder.dropout Dropout
1.60e-07 2.27e+01 input[0]
0.00e+00 2.52e+01 output
decoder T5Stack
not a tensor output
lm_head Linear
1.01e-06 7.92e+02 weight
0.00e+00 1.11e+00 input[0]
6.06e-02 8.39e+01 output
T5ForConditionalGeneration
not a tensor output
*** Starting batch number=3 ***
abs min abs max metadata
shared Embedding
1.01e-06 7.92e+02 weight
0.00e+00 2.78e+04 input[0]
5.36e-05 7.92e+02 output
[...]
```
Aquà obtendrás un gran número de frames mostrados - tantos como forward calls haya en tu modelo, por lo que puede o no ser lo que quieras, pero a veces puede ser más fácil de usar para debug que un debugger normal.
Por ejemplo, si un problema comienza a ocurrir en el batch 150. Entonces puedes mostrar las trazas de los batches 149 y 150 y comparar dónde
los números empezaron a divergir.
También puedes especificar el número de batch después del cual se debe detener el entrenamiento, con:
```python
debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)
```
| transformers/docs/source/es/debugging.md/0 | {
"file_path": "transformers/docs/source/es/debugging.md",
"repo_id": "transformers",
"token_count": 5532
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Tour rápido
[[open-in-colab]]
¡Entra en marcha con los ð€ Transformers! Comienza usando [`pipeline`] para una inferencia veloz, carga un modelo preentrenado y un tokenizador con una [AutoClass](./model_doc/auto) para resolver tu tarea de texto, visión o audio.
<Tip>
Todos los ejemplos de código presentados en la documentación tienen un botón arriba a la derecha para elegir si quieres ocultar o mostrar el código en Pytorch o TensorFlow.
Si no fuese asÃ, se espera que el código funcione para ambos backends sin ningún cambio.
</Tip>
## Pipeline
[`pipeline`] es la forma más fácil de usar un modelo preentrenado para una tarea dada.
<Youtube id="tiZFewofSLM"/>
El [`pipeline`] soporta muchas tareas comunes listas para usar:
**Texto**:
* Análisis de Sentimiento (Sentiment Analysis, en inglés): clasifica la polaridad de un texto dado.
* Generación de Texto (Text Generation, en inglés): genera texto a partir de un input dado.
* Reconocimiento de Entidades (Name Entity Recognition o NER, en inglés): etiqueta cada palabra con la entidad que representa (persona, fecha, ubicación, etc.).
* Responder Preguntas (Question answering, en inglés): extrae la respuesta del contexto dado un contexto y una pregunta.
* Rellenar Máscara (Fill-mask, en inglés): rellena el espacio faltante dado un texto con palabras enmascaradas.
* Resumir (Summarization, en inglés): genera un resumen de una secuencia larga de texto o un documento.
* Traducción (Translation, en inglés): traduce un texto a otro idioma.
* Extracción de CaracterÃsticas (Feature Extraction, en inglés): crea una representación tensorial del texto.
**Imagen**:
* Clasificación de Imágenes (Image Classification, en inglés): clasifica una imagen.
* Segmentación de Imágenes (Image Segmentation, en inglés): clasifica cada pixel de una imagen.
* Detección de Objetos (Object Detection, en inglés): detecta objetos dentro de una imagen.
**Audio**:
* Clasificación de Audios (Audio Classification, en inglés): asigna una etiqueta a un segmento de audio.
* Reconocimiento de Voz Automático (Automatic Speech Recognition o ASR, en inglés): transcribe datos de audio a un texto.
<Tip>
Para más detalles acerca del [`pipeline`] y tareas asociadas, consulta la documentación [aquÃ](./main_classes/pipelines).
</Tip>
### Uso del Pipeline
En el siguiente ejemplo, usarás el [`pipeline`] para análisis de sentimiento.
Instala las siguientes dependencias si aún no lo has hecho:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
Importa [`pipeline`] y especifica la tarea que deseas completar:
```py
>>> from transformers import pipeline
>>> clasificador = pipeline("sentiment-analysis", model="pysentimiento/robertuito-sentiment-analysis")
```
El pipeline descarga y almacena en caché el [modelo preentrenado](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) y tokeniza para análisis de sentimiento. Si no hubieramos elegido un modelo el pipeline habrÃa elegido uno por defecto. Ahora puedes usar `clasificador` en tu texto objetivo:
```py
>>> clasificador("Estamos muy felices de mostrarte la biblioteca de ð€ Transformers.")
[{'label': 'POS', 'score': 0.9320}]
```
Para más de un enunciado, entrega una lista al [`pipeline`] que devolverá una lista de diccionarios:
El [`pipeline`] también puede iterar sobre un dataset entero. Comienza instalando la biblioteca [ð€ Datasets](https://huggingface.co/docs/datasets/):
```bash
pip install datasets
```
Crea un [`pipeline`] con la tarea que deseas resolver y el modelo que quieres usar. Coloca el parámetro `device` a `0` para poner los tensores en un dispositivo CUDA:
```py
>>> import torch
>>> from transformers import pipeline
>>> reconocedor_de_voz = pipeline(
... "automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish", device=0
... )
```
A continuación, carga el dataset (ve ð€ Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) para más detalles) sobre el que quisieras iterar. Por ejemplo, vamos a cargar el dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14):
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="es-ES", split="train") # doctest: +IGNORE_RESULT
```
Debemos asegurarnos de que la frecuencia de muestreo del conjunto de datos coincide con la frecuencia de muestreo con la que se entrenó `jonatasgrosman/wav2vec2-large-xlsr-53-spanish`.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=reconocedor_de_voz.feature_extractor.sampling_rate))
```
Los archivos de audio se cargan y remuestrean automáticamente cuando llamamos a la columna `"audio"`.
Extraigamos las matrices de onda cruda (raw waveform, en inglés) de las primeras 4 muestras y pasémosla como una lista al pipeline:
```py
>>> resultado = reconocedor_de_voz(dataset[:4]["audio"])
>>> print([d["text"] for d in resultado])
['ahora buenas eh a ver tengo un problema con vuestra aplicación resulta que que quiero hacer una transferencia bancaria a una cuenta conocida pero me da error la aplicación a ver que a ver que puede ser', 'la aplicación no cargue saldo de mi nueva cuenta', 'hola tengo un problema con la aplicación no carga y y tampoco veo que carga el saldo de mi cuenta nueva dice que la aplicación está siendo reparada y ahora no puedo acceder a mi cuenta no necesito inmediatamente', 'hora buena la aplicación no se carga la vida no carga el saldo de mi cuenta nueva dice que la villadenta siendo reparada y oro no puedo hacer a mi cuenta']
```
Para un dataset más grande, donde los inputs son de mayor tamaño (como en habla/audio o visión), querrás pasar un generador en lugar de una lista que carga todos los inputs en memoria. Ve la [documentación del pipeline](./main_classes/pipelines) para más información.
### Usa otro modelo y otro tokenizador en el pipeline
El [`pipeline`] puede acomodarse a cualquier modelo del [Model Hub](https://huggingface.co/models) haciendo más fácil adaptar el [`pipeline`] para otros casos de uso. Por ejemplo, si quisieras un modelo capaz de manejar texto en francés, usa los tags en el Model Hub para filtrar entre los modelos apropiados. El resultado mejor filtrado devuelve un [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) multilingual fine-tuned para el análisis de sentimiento. Genial, ¡vamos a usar este modelo!
```py
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
```
<frameworkcontent>
<pt>
Usa [`AutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (más en un `AutoClass` debajo):
```py
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
>>> model = AutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</pt>
<tf>
Usa [`TFAutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (más en un `TFAutoClass` debajo):
```py
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
```
</tf>
</frameworkcontent>
Después puedes especificar el modelo y el tokenizador en el [`pipeline`], y aplicar el `classifier` en tu texto objetivo:
```py
>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
>>> classifier("Nous sommes trÚs heureux de vous présenter la bibliothÚque ð€ Transformers.")
[{'label': '5 stars', 'score': 0.7273}]
```
Si no pudieras encontrar el modelo para tu caso respectivo de uso necesitarás ajustar un modelo preentrenado a tus datos. Mira nuestro [tutorial de fine-tuning](./training) para aprender cómo. Finalmente, después de que has ajustado tu modelo preentrenado, ¡por favor considera compartirlo (ve el tutorial [aquÃ](./model_sharing)) con la comunidad en el Model Hub para democratizar el NLP! ð€
## AutoClass
<Youtube id="AhChOFRegn4"/>
Por debajo, las clases [`AutoModelForSequenceClassification`] y [`AutoTokenizer`] trabajan juntas para dar poder al [`pipeline`]. Una [AutoClass](./model_doc/auto) es un atajo que automáticamente recupera la arquitectura de un modelo preentrenado con su nombre o el path. Sólo necesitarás seleccionar el `AutoClass` apropiado para tu tarea y tu tokenizador asociado con [`AutoTokenizer`].
Regresemos a nuestro ejemplo y veamos cómo puedes usar el `AutoClass` para reproducir los resultados del [`pipeline`].
### AutoTokenizer
Un tokenizador es responsable de procesar el texto a un formato que sea entendible para el modelo. Primero, el tokenizador separará el texto en palabras llamadas *tokens*. Hay múltiples reglas que gobiernan el proceso de tokenización incluyendo el cómo separar una palabra y en qué nivel (aprende más sobre tokenización [aquÃ](./tokenizer_summary)). Lo más importante es recordar que necesitarás instanciar el tokenizador con el mismo nombre del modelo para asegurar que estás usando las mismas reglas de tokenización con las que el modelo fue preentrenado.
Carga un tokenizador con [`AutoTokenizer`]:
```py
>>> from transformers import AutoTokenizer
>>> nombre_del_modelo = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tokenizer = AutoTokenizer.from_pretrained(nombre_del_modelo)
```
Después, el tokenizador convierte los tokens a números para construir un tensor que servirá como input para el modelo. Esto es conocido como el *vocabulario* del modelo.
Pasa tu texto al tokenizador:
```py
>>> encoding = tokenizer("Estamos muy felices de mostrarte la biblioteca de ð€ Transformers.")
>>> print(encoding)
{'input_ids': [101, 10602, 14000, 13653, 43353, 10107, 10102, 47201, 10218, 10106, 18283, 10102, 100, 58263, 119, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
El tokenizador devolverá un diccionario conteniendo:
* [input_ids](./glossary#input-ids): representaciones numéricas de los tokens.
* [atttention_mask](.glossary#attention-mask): indica cuáles tokens deben ser atendidos.
Como con el [`pipeline`], el tokenizador aceptará una lista de inputs. Además, el tokenizador también puede rellenar (pad, en inglés) y truncar el texto para devolver un lote (batch, en inglés) de longitud uniforme:
<frameworkcontent>
<pt>
```py
>>> pt_batch = tokenizer(
... ["We are very happy to show you the ð€ Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="pt",
... )
```
</pt>
<tf>
```py
>>> tf_batch = tokenizer(
... ["We are very happy to show you the ð€ Transformers library.", "We hope you don't hate it."],
... padding=True,
... truncation=True,
... max_length=512,
... return_tensors="tf",
... )
```
</tf>
</frameworkcontent>
Lee el tutorial de [preprocessing](./preprocessing) para más detalles acerca de la tokenización.
### AutoModel
<frameworkcontent>
<pt>
ð€ Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`AutoModel`] como cargarÃas un [`AutoTokenizer`]. La única diferencia es seleccionar el [`AutoModel`] correcto para la tarea. Ya que estás clasificando texto, o secuencias, carga [`AutoModelForSequenceClassification`]:
```py
>>> from transformers import AutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
Ve el [task summary](./task_summary) para revisar qué clase del [`AutoModel`] deberÃas usar para cada tarea.
</Tip>
Ahora puedes pasar tu lote (batch) preprocesado de inputs directamente al modelo. Solo tienes que desempacar el diccionario añadiendo `**`:
```py
>>> pt_outputs = pt_model(**pt_batch)
```
El modelo producirá las activaciones finales en el atributo `logits`. Aplica la función softmax a `logits` para obtener las probabilidades:
```py
>>> from torch import nn
>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> print(pt_predictions)
tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
[0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>)
```
</pt>
<tf>
ð€ Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`TFAutoModel`] como cargarÃas un [`AutoTokenizer`]. La única diferencia es seleccionar el [`TFAutoModel`] correcto para la tarea. Ya que estás clasificando texto, o secuencias, carga [`TFAutoModelForSequenceClassification`]:
```py
>>> from transformers import TFAutoModelForSequenceClassification
>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment"
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)
```
<Tip>
Ve el [task summary](./task_summary) para revisar qué clase del [`AutoModel`]
deberÃas usar para cada tarea.
</Tip>
Ahora puedes pasar tu lote preprocesado de inputs directamente al modelo pasando las llaves del diccionario directamente a los tensores:
```py
>>> tf_outputs = tf_model(tf_batch)
```
El modelo producirá las activaciones finales en el atributo `logits`. Aplica la función softmax a `logits` para obtener las probabilidades:
```py
>>> import tensorflow as tf
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
>>> print(tf.math.round(tf_predictions * 10**4) / 10**4)
tf.Tensor(
[[0.0021 0.0018 0.0116 0.2121 0.7725]
[0.2084 0.1826 0.1969 0.1755 0.2365]], shape=(2, 5), dtype=float32)
```
</tf>
</frameworkcontent>
<Tip>
Todos los modelos de ð€ Transformers (PyTorch o TensorFlow) producirán los tensores *antes* de la función de activación
final (como softmax) porque la función de activación final es comúnmente fusionada con la pérdida.
</Tip>
Los modelos son [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) o [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) estándares asà que podrás usarlos en tu training loop usual. Sin embargo, para facilitar las cosas, ð€ Transformers provee una clase [`Trainer`] para PyTorch que añade funcionalidades para entrenamiento distribuido, precición mixta, y más. Para TensorFlow, puedes usar el método `fit` desde [Keras](https://keras.io/). Consulta el [tutorial de entrenamiento](./training) para más detalles.
<Tip>
Los outputs del modelo de ð€ Transformers son dataclasses especiales por lo que sus atributos pueden ser completados en un IDE.
Los outputs del modelo también se comportan como tuplas o diccionarios (e.g., puedes indexar con un entero, un slice o una cadena) en cuyo caso los atributos que son `None` son ignorados.
</Tip>
### Guarda un modelo
<frameworkcontent>
<pt>
Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`PreTrainedModel.save_pretrained`]:
```py
>>> pt_save_directory = "./pt_save_pretrained"
>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT
>>> pt_model.save_pretrained(pt_save_directory)
```
Cuando quieras usar el modelo otra vez cárgalo con [`PreTrainedModel.from_pretrained`]:
```py
>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained")
```
</pt>
<tf>
Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`TFPreTrainedModel.save_pretrained`]:
```py
>>> tf_save_directory = "./tf_save_pretrained"
>>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT
>>> tf_model.save_pretrained(tf_save_directory)
```
Cuando quieras usar el modelo otra vez cárgalo con [`TFPreTrainedModel.from_pretrained`]:
```py
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained")
```
</tf>
</frameworkcontent>
Una caracterÃstica particularmente interesante de ð€ Transformers es la habilidad de guardar el modelo y cargarlo como un modelo de PyTorch o TensorFlow. El parámetro `from_pt` o `from_tf` puede convertir el modelo de un framework al otro:
<frameworkcontent>
<pt>
```py
>>> from transformers import AutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
```
</pt>
<tf>
```py
>>> from transformers import TFAutoModel
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
```
</tf>
</frameworkcontent>
| transformers/docs/source/es/quicktour.md/0 | {
"file_path": "transformers/docs/source/es/quicktour.md",
"repo_id": "transformers",
"token_count": 6360
} |
# docstyle-ignore
INSTALL_CONTENT = """
# Installazione di Transformers
! pip install transformers datasets evaluate accelerate
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
black_avoid_patterns = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| transformers/docs/source/it/_config.py/0 | {
"file_path": "transformers/docs/source/it/_config.py",
"repo_id": "transformers",
"token_count": 190
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Modelli multilingue per l'inferenza
[[open-in-colab]]
Ci sono diversi modelli multilingue in ð€ Transformers, e il loro utilizzo per l'inferenza differisce da quello dei modelli monolingua. Non *tutti* gli utilizzi dei modelli multilingue sono però diversi. Alcuni modelli, come [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), possono essere usati come un modello monolingua. Questa guida ti mostrerà come utilizzare modelli multilingue che utilizzano un modo diverso per fare l'inferenza.
## XLM
XLM ha dieci diversi checkpoint, di cui solo uno Ú monolingua. I nove checkpoint rimanenti possono essere suddivisi in due categorie: i checkpoint che utilizzano i language embeddings e quelli che non li utilizzano.
### XLM con language embeddings
I seguenti modelli XLM utilizzano gli embeddings linguistici per specificare la lingua utilizzata per l'inferenza:
- `FacebookAI/xlm-mlm-ende-1024` (Modellazione mascherata del linguaggio (Masked language modeling, in inglese), Inglese-Tedesco)
- `FacebookAI/xlm-mlm-enfr-1024` (Modellazione mascherata del linguaggio, Inglese-Francese)
- `FacebookAI/xlm-mlm-enro-1024` (Modellazione mascherata del linguaggio, Inglese-Rumeno)
- `FacebookAI/xlm-mlm-xnli15-1024` (Modellazione mascherata del linguaggio, lingue XNLI)
- `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Modellazione mascherata del linguaggio + traduzione, lingue XNLI)
- `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese)
- `FacebookAI/xlm-clm-ende-1024` (Modellazione causale del linguaggio, Inglese-Tedesco)
Gli embeddings linguistici sono rappresentati come un tensore delle stesse dimensioni dell' `input_ids` passato al modello. I valori in questi tensori dipendono dal linguaggio usato e sono identificati dagli attributi `lang2id` e `id2lang` del tokenizer.
In questo esempio, carica il checkpoint `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese):
```py
>>> import torch
>>> from transformers import XLMTokenizer, XLMWithLMHeadModel
>>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024")
>>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024")
```
L'attributo `lang2id` del tokenizer mostra il linguaggio del modello e il suo ids:
```py
>>> print(tokenizer.lang2id)
{'en': 0, 'fr': 1}
```
Poi, crea un esempio di input:
```py
>>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1
```
Imposta l'id del linguaggio a `"en"` e usalo per definire il language embedding. Il language embedding Ú un tensore riempito con `0` perché questo Ú il language id per l'inglese. Questo tensore dovrebbe avere la stessa dimensione di `input_ids`.
```py
>>> language_id = tokenizer.lang2id["en"] # 0
>>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0])
>>> # We reshape it to be of size (batch_size, sequence_length)
>>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1)
```
Adesso puoi inserire `input_ids` e language embedding nel modello:
```py
>>> outputs = model(input_ids, langs=langs)
```
Lo script [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) può generare testo tramite i language embeddings usando i checkpoints `xlm-clm`.
### XLM senza language embeddings
I seguenti modelli XLM non richiedono l'utilizzo dei language embeddings per fare inferenza:
- `FacebookAI/xlm-mlm-17-1280` (Modellazione mascherata del linguaggio, 17 lingue)
- `FacebookAI/xlm-mlm-100-1280` (Modellazione mascherata del linguaggio, 100 lingue)
Questi modelli sono utilizzati per rappresentazioni generiche di frasi, a differenza dei precedenti checkpoints XML.
## BERT
Il seguente modello BERT può essere usato per compiti multilingue:
- `google-bert/bert-base-multilingual-uncased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 102 lingue)
- `google-bert/bert-base-multilingual-cased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 104 lingue)
Questi modelli non richiedono language embeddings per fare inferenza. Riescono ad identificare il linguaggio dal contesto e inferire di conseguenza.
## XLM-RoBERTa
Il seguente modello XLM-RoBERTa può essere usato per compiti multilingue:
- `FacebookAI/xlm-roberta-base` (Modellazione mascherata del linguaggio, 100 lingue)
- `FacebookAI/xlm-roberta-large` (Modellazione mascherata del linguaggio, 100 lingue)
XLM-RoBERTa Ú stato addestrato su 2.5TB di dati CommonCrawl appena creati e puliti in 100 lingue. Offre notevoli vantaggi rispetto ai modelli multilingue rilasciati in precedenza, come mBERT o XLM, in compiti come la classificazione, l'etichettatura delle sequenze e la risposta alle domande.
## M2M100
Il seguente modello M2M100 può essere usato per compiti multilingue:
- `facebook/m2m100_418M` (Traduzione)
- `facebook/m2m100_1.2B` (Traduzione)
In questo esempio, carica il checkpoint `facebook/m2m100_418M` per tradurre dal cinese all'inglese. Puoi impostare la lingua di partenza nel tokenizer:
```py
>>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
>>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger."
>>> chinese_text = "äžèŠææå·«åž«çäºå, å çºä»åæ¯åŸ®åŠç, åŸå¿«å°±æçŒæ."
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh")
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
```
Applica il tokenizer al testo:
```py
>>> encoded_zh = tokenizer(chinese_text, return_tensors="pt")
```
M2M100 forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese:
```py
>>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en"))
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.'
```
## MBart
Il seguente modello MBart può essere usato per compiti multilingue:
- `facebook/mbart-large-50-one-to-many-mmt` (Traduzione automatica multilingue uno-a-molti, 50 lingue)
- `facebook/mbart-large-50-many-to-many-mmt` (Traduzione automatica multilingue molti-a-molti, 50 lingue)
- `facebook/mbart-large-50-many-to-one-mmt` (Traduzione automatica multilingue molti-a-uno, 50 lingue)
- `facebook/mbart-large-50` (Traduzione multilingue, 50 lingue)
- `facebook/mbart-large-cc25`
In questo esempio, carica il checkpoint `facebook/mbart-large-50-many-to-many-mmt` per tradurre dal finlandese all'inglese. Puoi impostare la lingua di partenza nel tokenizer:
```py
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
>>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger."
>>> fi_text = "ÃlÀ sekaannu velhojen asioihin, sillÀ ne ovat hienovaraisia ja nopeasti vihaisia."
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
```
Applica il tokenizer sul testo:
```py
>>> encoded_en = tokenizer(en_text, return_tensors="pt")
```
MBart forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese:
```py
>>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX"))
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
"Don't interfere with the wizard's affairs, because they are subtle, will soon get angry."
```
Se stai usando il checkpoint `facebook/mbart-large-50-many-to-one-mmt`, non hai bisogno di forzare l'id della lingua obiettivo come primo token generato altrimenti l'uso Ú lo stesso. | transformers/docs/source/it/multilingual.md/0 | {
"file_path": "transformers/docs/source/it/multilingual.md",
"repo_id": "transformers",
"token_count": 3202
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Hyperparameter Search using Trainer API
ð€ Transformersã¯ãð€ Transformersã¢ãã«ã®ãã¬ãŒãã³ã°ãæé©åãã[`Trainer`]ã¯ã©ã¹ãæäŸããç¬èªã®ãã¬ãŒãã³ã°ã«ãŒããæåã§èšè¿°ããã«ãã¬ãŒãã³ã°ãéå§ããã®ãç°¡åã«ãªããŸãã[`Trainer`]ã¯ãã€ããŒãã©ã¡ãŒã¿ãŒæ€çŽ¢ã®APIãæäŸããŠããŸãããã®ããã¥ã¡ã³ãã§ã¯ããããäŸç€ºããŸãã
## Hyperparameter Search backend
[`Trainer`]ã¯çŸåšã4ã€ã®ãã€ããŒãã©ã¡ãŒã¿ãŒæ€çŽ¢ããã¯ãšã³ãããµããŒãããŠããŸãïŒ
[optuna](https://optuna.org/)ã[sigopt](https://sigopt.com/)ã[raytune](https://docs.ray.io/en/latest/tune/index.html)ãããã³[wandb](https://wandb.ai/site/sweeps)ã
ãããã䜿çšããåã«ããã€ããŒãã©ã¡ãŒã¿ãŒæ€çŽ¢ããã¯ãšã³ããã€ã³ã¹ããŒã«ããå¿
èŠããããŸãã
```bash
pip install optuna/sigopt/wandb/ray[tune]
```
## How to enable Hyperparameter search in example
ãã€ããŒãã©ã¡ãŒã¿ã®æ€çŽ¢ã¹ããŒã¹ãå®çŸ©ããç°ãªãããã¯ãšã³ãã«ã¯ç°ãªããã©ãŒããããå¿
èŠã§ãã
Sigoptã®å Žåãsigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter) ãåç
§ããŠãã ãããããã¯ä»¥äžã®ãããªãã®ã§ãïŒ
```py
>>> def sigopt_hp_space(trial):
... return [
... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"},
... {
... "categorical_values": ["16", "32", "64", "128"],
... "name": "per_device_train_batch_size",
... "type": "categorical",
... },
... ]
```
Optunaã«é¢ããŠã¯ã[object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py)ãã芧ãã ããã以äžã®ããã«ãªããŸãïŒ
```py
>>> def optuna_hp_space(trial):
... return {
... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]),
... }
```
Optunaã¯ãå€ç®çã®ãã€ããŒãã©ã¡ãŒã¿æé©åïŒHPOïŒãæäŸããŠããŸãã `hyperparameter_search` ã§ `direction` ãæž¡ããè€æ°ã®ç®ç颿°å€ãè¿ãããã®ç¬èªã® `compute_objective` ãå®çŸ©ããããšãã§ããŸãã Pareto FrontïŒ`List[BestRun]`ïŒã¯ `hyperparameter_search` ã§è¿ããã[test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py) ã®ãã¹ãã±ãŒã¹ `TrainerHyperParameterMultiObjectOptunaIntegrationTest` ãåç
§ããå¿
èŠããããŸããããã¯ä»¥äžã®ããã«ãªããŸãã
```py
>>> best_trials = trainer.hyperparameter_search(
... direction=["minimize", "maximize"],
... backend="optuna",
... hp_space=optuna_hp_space,
... n_trials=20,
... compute_objective=compute_objective,
... )
```
Ray Tuneã«é¢ããŠã[object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html)ãåç
§ããŠãã ããã以äžã®ããã«ãªããŸãïŒ
```py
>>> def ray_hp_space(trial):
... return {
... "learning_rate": tune.loguniform(1e-6, 1e-4),
... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]),
... }
```
Wandbã«ã€ããŠã¯ã[object_parameter](https://docs.wandb.ai/guides/sweeps/configuration)ãã芧ãã ãããããã¯ä»¥äžã®ããã«ãªããŸãïŒ
```py
>>> def wandb_hp_space(trial):
... return {
... "method": "random",
... "metric": {"name": "objective", "goal": "minimize"},
... "parameters": {
... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
... "per_device_train_batch_size": {"values": [16, 32, 64, 128]},
... },
... }
```
`model_init` 颿°ãå®çŸ©ããããã [`Trainer`] ã«æž¡ãäŸã瀺ããŸãïŒ
```py
>>> def model_init(trial):
... return AutoModelForSequenceClassification.from_pretrained(
... model_args.model_name_or_path,
... from_tf=bool(".ckpt" in model_args.model_name_or_path),
... config=config,
... cache_dir=model_args.cache_dir,
... revision=model_args.model_revision,
... token=True if model_args.use_auth_token else None,
... )
```
[`Trainer`] ã `model_init` 颿°ããã¬ãŒãã³ã°åŒæ°ããã¬ãŒãã³ã°ããŒã¿ã»ããããã¹ãããŒã¿ã»ãããããã³è©äŸ¡é¢æ°ãšå
±ã«äœæããŠãã ãã:
```py
>>> trainer = Trainer(
... model=None,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... processing_class=tokenizer,
... model_init=model_init,
... data_collator=data_collator,
... )
```
ãã€ããŒãã©ã¡ãŒã¿ãŒã®æ¢çŽ¢ãåŒã³åºããæè¯ã®ãã©ã€ã¢ã« ãã©ã¡ãŒã¿ãŒãååŸããŸããããã¯ãšã³ã㯠`"optuna"` / `"sigopt"` / `"wandb"` / `"ray"` ã§ããå¯èœæ§ããããŸããæ¹å㯠`"minimize"` ãŸã㯠`"maximize"` ã§ãããç®æšããã倧ãããããå°ãããããã瀺ããŸãã
`compute_objective` 颿°ãç¬èªã«å®çŸ©ããããšãã§ããŸããå®çŸ©ãããŠããªãå Žåãããã©ã«ãã® `compute_objective` ãåŒã³åºãããF1ãªã©ã®è©äŸ¡ã¡ããªãã¯ã®åèšãç®æšå€ãšããŠè¿ãããŸãã
```py
>>> best_trial = trainer.hyperparameter_search(
... direction="maximize",
... backend="optuna",
... hp_space=optuna_hp_space,
... n_trials=20,
... compute_objective=compute_objective,
... )
```
## Hyperparameter search For DDP finetune
çŸåšãDDPïŒDistributed Data ParallelïŒã®ããã®ãã€ããŒãã©ã¡ãŒã¿ãŒæ€çŽ¢ã¯ãOptuna ãš SigOpt ã«å¯ŸããŠæå¹ã«ãªã£ãŠããŸããã©ã³ã¯ãŒãããã»ã¹ã®ã¿ãæ€çŽ¢ãã©ã€ã¢ã«ãçæããä»ã®ã©ã³ã¯ã«åŒæ°ãæž¡ããŸãã
| transformers/docs/source/ja/hpo_train.md/0 | {
"file_path": "transformers/docs/source/ja/hpo_train.md",
"repo_id": "transformers",
"token_count": 2838
} |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ALBERT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=albert">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/albert-base-v2">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## æŠèŠ
ALBERTã¢ãã«ã¯ãã[ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942)ããšããè«æã§Zhenzhong LanãMingda ChenãSebastian GoodmanãKevin GimpelãPiyush SharmaãRadu Soricutã«ãã£ãŠææ¡ãããŸãããBERTã®ã¡ã¢ãªæ¶è²»ãæžãããã¬ãŒãã³ã°ãé«éåããããã®ãã©ã¡ãŒã¿åæžæè¡ã2ã€ç€ºããŠããŸãïŒ
- åã蟌ã¿è¡åã2ã€ã®å°ããªè¡åã«åå²ããã
- ã°ã«ãŒãéã§åå²ãããç¹°ãè¿ãå±€ã䜿çšããã
è«æã®èŠæšã¯ä»¥äžã®éãã§ãïŒ
*èªç¶èšèªè¡šçŸã®äºååŠç¿æã«ã¢ãã«ã®ãµã€ãºãå¢ãããšãäžæµã¿ã¹ã¯ã®ããã©ãŒãã³ã¹ãåäžããããšããã°ãã°ãããŸããããããããæç¹ã§ãããªãã¢ãã«ã®å¢å€§ã¯ãGPU/TPUã®ã¡ã¢ãªå¶éãé·ãèšç·Žæéãäºæãã¬ã¢ãã«ã®å£åãšãã£ãåé¡ã®ããã«å°é£ã«ãªããŸãããããã®åé¡ã«å¯ŸåŠããããã«ãæã
ã¯BERTã®ã¡ã¢ãªæ¶è²»ãäœæžããèšç·Žé床ãé«ããããã®2ã€ã®ãã©ã¡ãŒã¿åæžæè¡ãææ¡ããŸããå
æ¬çãªå®èšŒç蚌æ ã¯ãæã
ã®ææ¡æ¹æ³ãå
ã®BERTã«æ¯ã¹ãŠã¯ããã«ããã¹ã±ãŒã«ããã¢ãã«ãçã¿åºãããšã瀺ããŠããŸãããŸããæéã®äžè²«æ§ãã¢ããªã³ã°ã«çŠç¹ãåœãŠãèªå·±æåž«ããæå€±ã䜿çšããè€æ°ã®æãå«ãŸããäžæµã¿ã¹ã¯ã«äžè²«ããŠå©ããšãªãããšã瀺ããŸãããã®çµæãæã
ã®æè¯ã®ã¢ãã«ã¯ãBERT-largeã«æ¯ã¹ãŠãã©ã¡ãŒã¿ãå°ãªãã«ãããããããGLUEãRACEãSQuADãã³ãããŒã¯ã§æ°ããªæå
端ã®çµæã確ç«ããŸãã*
ãã®ã¢ãã«ã¯[lysandre](https://huggingface.co/lysandre)ã«ããæäŸãããŸããããã®ã¢ãã«ã®jaxããŒãžã§ã³ã¯[kamalkraj](https://huggingface.co/kamalkraj)ã«ããæäŸãããŸããããªãªãžãã«ã®ã³ãŒãã¯[ãã¡ã](https://github.com/google-research/ALBERT)ã§èŠãããšãã§ããŸãã
## 䜿çšäžã®ãã³ã
- ALBERTã¯çµ¶å¯Ÿäœçœ®åã蟌ã¿ã䜿çšããã¢ãã«ãªã®ã§ãéåžžãå
¥åãå·ŠåŽã§ã¯ãªãå³åŽã«ããã£ã³ã°ããããšãæšå¥šãããŸãã
- ALBERTã¯ç¹°ãè¿ãå±€ã䜿çšããããã¡ã¢ãªäœ¿çšéã¯å°ãããªããŸãããåãæ°ã®ïŒç¹°ãè¿ãïŒå±€ãå埩ããªããã°ãªããªããããé ãå±€ã®æ°ãåãã§ããã°BERTã®ãããªã¢ãŒããã¯ãã£ãšåæ§ã®èšç®ã³ã¹ããããããŸãã
- åã蟌ã¿ãµã€ãºEã¯é ããµã€ãºHãšç°ãªããŸãããããã¯åã蟌ã¿ãæèã«äŸåããªãïŒäžã€ã®åã蟌ã¿ãã¯ãã«ãäžã€ã®ããŒã¯ã³ã衚ãïŒã®ã«å¯Ÿããé ãç¶æ
ã¯æèã«äŸåããïŒ1ã€ã®é ãç¶æ
ãããŒã¯ã³ç³»åã衚ãïŒãããH >> Eãšããããšãããè«ççã§ãããŸããåã蟌ã¿è¡åã®ãµã€ãºã¯V x Eãšå€§ããã§ãïŒVã¯èªåœãµã€ãºïŒãE < Hã§ããã°ããã©ã¡ãŒã¿ã¯å°ãªããªããŸãã
- å±€ã¯ãã©ã¡ãŒã¿ãå
±æããã°ã«ãŒãã«åå²ãããŠããŸãïŒã¡ã¢ãªç¯çŽã®ããïŒã次æäºæž¬ïŒNSP: Next Sentence PredictionïŒã¯æã®é åºäºæž¬ã«çœ®ãæããããŸãïŒå
¥åã§ã¯ã2ã€ã®æAãšBïŒãããã¯é£ç¶ããŠããïŒããããAã«ç¶ããŠBãäžããããBã«ç¶ããŠAãäžããŸããã¢ãã«ã¯ããããå
¥ãæ¿ãã£ãŠãããã©ãããäºæž¬ããå¿
èŠããããŸãã
## åèè³æ
- [ããã¹ãåé¡ã¿ã¹ã¯ã¬ã€ã](../tasks/sequence_classification)
- [ããŒã¯ã³åé¡ã¿ã¹ã¯ã¬ã€ã](../tasks/token_classification)
- [質åå¿çã¿ã¹ã¯ã¬ã€ã](../tasks/question_answering)
- [ãã¹ã¯ãããèšèªã¢ãã«ã¿ã¹ã¯ã¬ã€ã](../tasks/masked_language_modeling)
- [å€è¢éžæã¿ã¹ã¯ã¬ã€ã](../tasks/multiple_choice)
## AlbertConfig
[[autodoc]] AlbertConfig
## AlbertTokenizer
[[autodoc]] AlbertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## AlbertTokenizerFast
[[autodoc]] AlbertTokenizerFast
## Albert specific outputs
[[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput
[[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput
<frameworkcontent>
<pt>
## AlbertModel
[[autodoc]] AlbertModel
- forward
## AlbertForPreTraining
[[autodoc]] AlbertForPreTraining
- forward
## AlbertForMaskedLM
[[autodoc]] AlbertForMaskedLM
- forward
## AlbertForSequenceClassification
[[autodoc]] AlbertForSequenceClassification
- forward
## AlbertForMultipleChoice
[[autodoc]] AlbertForMultipleChoice
## AlbertForTokenClassification
[[autodoc]] AlbertForTokenClassification
- forward
## AlbertForQuestionAnswering
[[autodoc]] AlbertForQuestionAnswering
- forward
</pt>
<tf>
## TFAlbertModel
[[autodoc]] TFAlbertModel
- call
## TFAlbertForPreTraining
[[autodoc]] TFAlbertForPreTraining
- call
## TFAlbertForMaskedLM
[[autodoc]] TFAlbertForMaskedLM
- call
## TFAlbertForSequenceClassification
[[autodoc]] TFAlbertForSequenceClassification
- call
## TFAlbertForMultipleChoice
[[autodoc]] TFAlbertForMultipleChoice
- call
## TFAlbertForTokenClassification
[[autodoc]] TFAlbertForTokenClassification
- call
## TFAlbertForQuestionAnswering
[[autodoc]] TFAlbertForQuestionAnswering
- call
</tf>
<jax>
## FlaxAlbertModel
[[autodoc]] FlaxAlbertModel
- __call__
## FlaxAlbertForPreTraining
[[autodoc]] FlaxAlbertForPreTraining
- __call__
## FlaxAlbertForMaskedLM
[[autodoc]] FlaxAlbertForMaskedLM
- __call__
## FlaxAlbertForSequenceClassification
[[autodoc]] FlaxAlbertForSequenceClassification
- __call__
## FlaxAlbertForMultipleChoice
[[autodoc]] FlaxAlbertForMultipleChoice
- __call__
## FlaxAlbertForTokenClassification
[[autodoc]] FlaxAlbertForTokenClassification
- __call__
## FlaxAlbertForQuestionAnswering
[[autodoc]] FlaxAlbertForQuestionAnswering
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/ja/model_doc/albert.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/albert.md",
"repo_id": "transformers",
"token_count": 2960
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BigBirdPegasus
## Overview
BigBird ã¢ãã«ã¯ã[Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) ã§ææ¡ãããŸããã
ã¶ããŒã«ããã³ãžã«ãšã°ã«ã¬ãã·ã¥ãã°ã«ãšããã€ãã¯ããŒã«ã»ã¢ãŽã£ããŽã¡ãšãšã€ã³ãºãªãŒããžã§ã·ã¥ã¢ãšã¢ã«ãã«ãã£ãã¯ãªã¹ãšãªã³ã¿ãã³ã
ãµã³ãã£ã¢ãŽãšãã¡ã ããã£ãªãããšã©ãã©ãã¢ãã«ãŒããšã¯ã³ãããŒãã¡ã³ãšã€ã³ããªãŒãªã©ã BigBird ã¯æ³šç®åºŠãäœã
BERT ãªã©ã® Transformer ããŒã¹ã®ã¢ãã«ãããã«é·ãã·ãŒã±ã³ã¹ã«æ¡åŒµãããTransformer ããŒã¹ã®ã¢ãã«ããŸã°ãã«å ããŠ
ã¢ãã³ã·ã§ã³ãšåæ§ã«ãBigBird ã¯å
¥åã·ãŒã±ã³ã¹ã«ã©ã³ãã ã¢ãã³ã·ã§ã³ã ãã§ãªãã°ããŒãã« ã¢ãã³ã·ã§ã³ãé©çšããŸããçè«çã«ã¯ã
ãŸã°ãã§å
šäœçã§ã©ã³ãã ãªæ³šæãé©çšãããšãå®å
šãªæ³šæã«è¿ã¥ãããšã瀺ãããŠããŸããã
é·ãã·ãŒã±ã³ã¹ã§ã¯èšç®å¹çã倧å¹
ã«åäžããŸããããé·ãã³ã³ããã¹ããåŠçã§ããæ©èœã®çµæãšããŠã
BigBird ã¯ã質åå¿çã
BERT ãŸã㯠RoBERTa ãšæ¯èŒããèŠçŽã
è«æã®èŠçŽã¯æ¬¡ã®ãšããã§ãã
*BERT ãªã©ã®ãã©ã³ã¹ãã©ãŒããŒããŒã¹ã®ã¢ãã«ã¯ãNLP ã§æãæåããæ·±å±€åŠç¿ã¢ãã«ã® 1 ã€ã§ãã
æ®å¿µãªããããããã®äžæ žçãªå¶éã® 1 ã€ã¯ãã·ãŒã±ã³ã¹ã«å¯Ÿããäºæ¬¡äŸåæ§ (äž»ã«ã¡ã¢ãªã«é¢ãã) ã§ãã
å®å
šãªæ³šæã¡ã«ããºã ã«ããé·ãã§ããããã解決ããããã«ãBigBird ã¯ããŸã°ããªæ³šæã¡ã«ããºã ãææ¡ããŸãã
ãã®äºæ¬¡äŸåé¢ä¿ãç·åœ¢ã«åæžããŸãã BigBird ãã·ãŒã±ã³ã¹é¢æ°ã®æ±çšè¿äŒŒåšã§ããããšã瀺ããŸãã
ãã¥ãŒãªã³ã°ã¯å®å
šã§ãããããäºæ¬¡å®å
šæ³šæã¢ãã«ã®ãããã®ç¹æ§ãä¿åãããŸããéäžãç§ãã¡ã®
çè«åæã«ãããO(1) åã®ã°ããŒãã« ããŒã¯ã³ (CLS ãªã©) ãæã€å©ç¹ã®äžéšãæããã«ãªãã
ã¹ããŒã¹æ³šæã¡ã«ããºã ã®äžéšãšããŠã®ã·ãŒã±ã³ã¹ãææ¡ãããã¹ããŒã¹ ã¢ãã³ã·ã§ã³ã¯ã次ã®é·ãã®ã·ãŒã±ã³ã¹ãåŠçã§ããŸãã
åæ§ã®ããŒããŠã§ã¢ã䜿çšããŠä»¥åã«å¯èœã§ãã£ããã®ã® 8 åãããé·ãã³ã³ããã¹ããåŠçã§ããæ©èœã®çµæãšããŠã
BigBird ã¯ã質åå¿çãèŠçŽãªã©ã®ããŸããŸãª NLP ã¿ã¹ã¯ã®ããã©ãŒãã³ã¹ã倧å¹
ã«åäžãããŸããç§éã
ã²ããã¯ã¹ããŒã¿ãžã®æ°ããã¢ããªã±ãŒã·ã§ã³ãææ¡ããŸãã*
## Usage tips
- BigBird ã®æ³šæãã©ã®ããã«æ©èœãããã«ã€ããŠã®è©³çްãªèª¬æã«ã€ããŠã¯ã[ãã®ããã°æçš¿](https://huggingface.co/blog/big-bird) ãåç
§ããŠãã ããã
- BigBird ã«ã¯ã**original_full** ãš **block_sparse** ã® 2 ã€ã®å®è£
ãä»å±ããŠããŸããã·ãŒã±ã³ã¹é·ã 1024 æªæºã®å Žåãæ¬¡ã䜿çšããŸãã
**block_sparse** ã䜿çšããŠãã¡ãªããããªãããã**original_full** ã䜿çšããããšããå§ãããŸãã
- ã³ãŒãã¯çŸåšã3 ãããã¯ãš 2 ã°ããŒãã« ãããã¯ã®ãŠã£ã³ã㊠ãµã€ãºã䜿çšããŠããŸãã
- ã·ãŒã±ã³ã¹ã®é·ãã¯ããã㯠ãµã€ãºã§å²ãåããå¿
èŠããããŸãã
- çŸåšã®å®è£
ã§ã¯ **ITC** ã®ã¿ããµããŒããããŠããŸãã
- çŸåšã®å®è£
ã§ã¯ **num_random_blocks = 0** ã¯ãµããŒããããŠããŸããã
- BigBirdPegasus 㯠[PegasusTokenizer](https://github.com/huggingface/transformers/blob/main/src/transformers/models/pegasus/tokenization_pegasus.py) ã䜿çšããŸãã
- BigBird ã¯çµ¶å¯Ÿäœçœ®åã蟌ã¿ãåããã¢ãã«ã§ãããããéåžžã¯å
¥åãå³åŽã«ããã£ã³ã°ããããšããå§ãããŸãã
å·Šã
å
ã®ã³ãŒã㯠[ãã¡ã](https://github.com/google-research/bigbird) ã«ãããŸãã
## ããã¥ã¡ã³ã ãªãœãŒã¹
- [ããã¹ãåé¡ã¿ã¹ã¯ã¬ã€ã](../tasks/sequence_classification)
- [質ååçã¿ã¹ã¯ ã¬ã€ã](../tasks/question_answering)
- [å æèšèªã¢ããªã³ã° ã¿ã¹ã¯ ã¬ã€ã](../tasks/language_modeling)
- [翻蚳ã¿ã¹ã¯ã¬ã€ã](../tasks/translation)
- [èŠçŽã¿ã¹ã¯ã¬ã€ã](../tasks/summarization)
## BigBirdPegasusConfig
[[autodoc]] BigBirdPegasusConfig
- all
## BigBirdPegasusModel
[[autodoc]] BigBirdPegasusModel
- forward
## BigBirdPegasusForConditionalGeneration
[[autodoc]] BigBirdPegasusForConditionalGeneration
- forward
## BigBirdPegasusForSequenceClassification
[[autodoc]] BigBirdPegasusForSequenceClassification
- forward
## BigBirdPegasusForQuestionAnswering
[[autodoc]] BigBirdPegasusForQuestionAnswering
- forward
## BigBirdPegasusForCausalLM
[[autodoc]] BigBirdPegasusForCausalLM
- forward
| transformers/docs/source/ja/model_doc/bigbird_pegasus.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/bigbird_pegasus.md",
"repo_id": "transformers",
"token_count": 2264
} |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# CLIP
## Overview
CLIP ã¢ãã«ã¯ãAlec RadfordãJong Wook KimãChris HallacyãAditya RameshãGabriel Goh Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) ã§ææ¡ãããŸããã
ãµã³ãã£ãã»ã¢ã¬ã«ã¯ã«ãã®ãªãã·ã¥ã»ãµã¹ããªãŒãã¢ãã³ãã»ã¢ã¹ã±ã«ããã¡ã©ã»ãã·ã¥ãã³ããžã£ãã¯ã»ã¯ã©ãŒã¯ãã°ã¬ããã§ã³ã»ã¯ã«ãŒã¬ãŒãã€ãªã€ã»ãµãã±ãŽã¡ãŒãã¯ãªãã
(Contrastive Language-Image Pre-Training) ã¯ãããŸããŸãª (ç»åãããã¹ã) ãã¢ã§ãã¬ãŒãã³ã°ããããã¥ãŒã©ã« ãããã¯ãŒã¯ã§ããããã
çŽæ¥æé©åããããšãªããäžããããç»åããæãé¢é£æ§ã®é«ãããã¹ã ã¹ãããããäºæž¬ããããã«èªç¶èšèªã§æç€ºãããŸãã
GPT-2 ããã³ 3 ã®ãŒãã·ã§ããæ©èœãšåæ§ã«ãã¿ã¹ã¯ã«å¯ŸããŠã
è«æã®èŠçŽã¯æ¬¡ã®ãšããã§ãã
*æå
端ã®ã³ã³ãã¥ãŒã¿ãŒ ããžã§ã³ ã·ã¹ãã ã¯ããããããå®ãããããªããžã§ã¯ã ã«ããŽãªã®åºå®ã»ãããäºæž¬ããããã«ãã¬ãŒãã³ã°ãããŠããŸãããã
å¶éããã圢åŒã®ç£èŠã§ã¯ãæå®ããããã«è¿œå ã®ã©ãã«ä»ãããŒã¿ãå¿
èŠãšãªããããäžè¬æ§ãšäœ¿ãããããå¶éãããŸãã
ãã®ä»ã®èŠèŠçãªã³ã³ã»ãããç»åã«é¢ããçã®ããã¹ãããçŽæ¥åŠç¿ããããšã¯ã
ããåºç¯ãªç£ç£æºãã©ã®ãã£ãã·ã§ã³ã衚瀺ãããããäºæž¬ãããšããåçŽãªäºåãã¬ãŒãã³ã° ã¿ã¹ã¯ãæå¹ã§ããããšã瀺ããŸãã
400 ã®ããŒã¿ã»ããã§ SOTA ç»å衚çŸãæåããåŠç¿ããããã®å¹ççãã€ã¹ã±ãŒã©ãã«ãªæ¹æ³ã¯ã©ã®ç»åã§ãã
ã€ã³ã¿ãŒãããããåéãããæ°çŸäžã®ïŒç»åãããã¹ãïŒãã¢ãäºåãã¬ãŒãã³ã°åŸãèªç¶èšèªã䜿çšããŠåç
§ããŸãã
èŠèŠçãªæŠå¿µãåŠç¿ãïŒãŸãã¯æ°ããæŠå¿µã説æãïŒãäžæµã®ã¿ã¹ã¯ãžã®ã¢ãã«ã®ãŒãã·ã§ãã転éãå¯èœã«ããŸããç§ãã¡ã¯å匷ããŸã
30 ãè¶
ããããŸããŸãªæ¢åã®ã³ã³ãã¥ãŒã¿ãŒ ããžã§ã³ ããŒã¿ã»ããã§ã¿ã¹ã¯ããŸããã£ãŠãã³ãããŒã¯ãè¡ãããšã«ããããã®ã¢ãããŒãã®ããã©ãŒãã³ã¹ãè©äŸ¡ããŸãã
OCRããããªå
ã®ã¢ã¯ã·ã§ã³èªèãå°ççäœçœ®ç¹å®ãããã³ããŸããŸãªçš®é¡ã®ãã现ãããªããžã§ã¯ãåé¡ãªã©ãã®
ã¢ãã«ã¯ã»ãšãã©ã®ã¿ã¹ã¯ã«ç°¡åã«ç§»è¡ã§ããå€ãã®å Žåãå¿
èŠããªããŠãå®å
šã«ç£èŠãããããŒã¹ã©ã€ã³ãšç«¶åããŸãã
ããŒã¿ã»ããåºæã®ãã¬ãŒãã³ã°ã«é©ããŠããŸããããšãã°ãImageNet ãŒãã·ã§ããã§ã¯ãªãªãžãã«ã® ResNet-50 ã®ç²ŸåºŠãšäžèŽããŸãã
ãã¬ãŒãã³ã°ã«äœ¿çšããã 128 äžã®ãã¬ãŒãã³ã° ãµã³ãã«ã䜿çšããå¿
èŠã¯ãããŸãããã³ãŒãããªãªãŒã¹ããäºåãã¬ãŒãã³ã°æžã¿
ã¢ãã«ã®éã¿ã¯ãã® https URL ã§ç¢ºèªã§ããŸãã*
ãã®ã¢ãã«ã¯ [valhalla](https://huggingface.co/valhalla) ã«ãã£ãŠæäŸãããŸãããå
ã®ã³ãŒã㯠[ãã](https://github.com/openai/CLIP) ã«ãããŸãã
## Usage tips and example
CLIP ã¯ããã«ãã¢ãŒãã«ãªããžã§ã³ããã³èšèªã¢ãã«ã§ããç»åãšããã¹ãã®é¡äŒŒæ§ããŒãã·ã§ããç»åã«äœ¿çšã§ããŸãã
åé¡ã CLIP ã¯ãViT ã®ãããªãã©ã³ã¹ãã©ãŒããŒã䜿çšããŠèŠèŠçç¹åŸŽãååŸããå æèšèªã¢ãã«ã䜿çšããŠããã¹ããååŸããŸã
ç¹åŸŽã次ã«ãããã¹ããšèŠèŠã®äž¡æ¹ã®ç¹åŸŽããåãæ¬¡å
ã®æœåšç©ºéã«æåœ±ãããŸããããã
æåœ±ãããç»åãšããã¹ãã®ç¹åŸŽéã®ç©ãåæ§ã®ã¹ã³ã¢ãšããŠäœ¿çšãããŸãã
ç»åã Transformer ãšã³ã³ãŒãã«äŸçµŠããããã«ãåç»åã¯åºå®ãµã€ãºã®éè€ããªããããã®ã·ãŒã±ã³ã¹ã«åå²ãããŸãã
ãããã¯ç·åœ¢ã«åã蟌ãŸããŸãã [CLS] ããŒã¯ã³ã¯ãã€ã¡ãŒãžå
šäœã®è¡šçŸãšããŠæ©èœããããã«è¿œå ãããŸããäœå®¶ãã¡
ãŸãã絶察äœçœ®åã蟌ã¿ã远å ããçµæãšããŠåŸããããã¯ãã«ã®ã·ãŒã±ã³ã¹ãæšæºã® Transformer ãšã³ã³ãŒãã«äŸçµŠããŸãã
[`CLIPImageProcessor`] ã䜿çšããŠãã¢ãã«ã®ç»åã®ãµã€ãºå€æŽ (ãŸãã¯åã¹ã±ãŒã«) ããã³æ£èŠåãè¡ãããšãã§ããŸãã
[`CLIPTokenizer`] ã¯ããã¹ãã®ãšã³ã³ãŒãã«äœ¿çšãããŸãã [`CLIPProcessor`] ã¯ã©ããããŸã
[`CLIPImageProcessor`] ãš [`CLIPTokenizer`] ãäž¡æ¹ã®åäžã€ã³ã¹ã¿ã³ã¹ã«çµ±å
ããã¹ãããšã³ã³ãŒãããŠç»åãæºåããŸããæ¬¡ã®äŸã¯ã次ã®ã¡ãœããã䜿çšããŠç»åãšããã¹ãã®é¡äŒŒæ§ã¹ã³ã¢ãååŸããæ¹æ³ã瀺ããŠããŸãã
[`CLIPProcessor`] ãš [`CLIPModel`]ã
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import CLIPProcessor, CLIPModel
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```
## Resources
CLIP ã䜿ãå§ããã®ã«åœ¹ç«ã€å
¬åŒ Hugging Face ããã³ã³ãã¥ãã㣠(ð ã§ç€ºãããŠãã) ãªãœãŒã¹ã®ãªã¹ãã
- [ãªã¢ãŒã ã»ã³ã·ã³ã° (è¡æ) ç»åãšãã£ãã·ã§ã³ã䜿çšãã CLIP ã®åŸ®èª¿æŽ](https://huggingface.co/blog/fine-tune-clip-rsicd)ã[RSICD ããŒã¿ã»ãã] ã䜿çšã㊠CLIP ã埮調æŽããæ¹æ³ã«é¢ããããã°æçš¿(https://github.com/201528014227051/RSICD_optimal) ãšãããŒã¿æ¡åŒµã«ããããã©ãŒãã³ã¹ã®å€åã®æ¯èŒã
- ãã® [ãµã³ãã« ã¹ã¯ãªãã](https://github.com/huggingface/transformers/tree/main/examples/pytorch/contrastive-image-text) ã¯ããã¬- [COCO ããŒã¿ã»ãã](https://cocodataset.org/#home) ã䜿çšããŠãã¬ãŒãã³ã°ãããããžã§ã³ããã³ããã¹ã ãšã³ã³ãŒããŒã
<PipelineTag pipeline="image-to-text"/>
- ç»åãã£ãã·ã§ã³ã®ããŒã æ€çŽ¢ã«ããæšè«ã«äºåãã¬ãŒãã³ã°æžã¿ CLIP ã䜿çšããæ¹æ³ã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/drive/1tuoAC5F4sC7qid56Z0ap-stR3rwdk0ZV?usp=sharing)ã ð
**ç»åæ€çŽ¢**
- äºåãã¬ãŒãã³ã°ããã CLIP ã䜿çšããç»åæ€çŽ¢ãš MRR (å¹³åçžäºã©ã³ã¯) ã¹ã³ã¢ã®èšç®ã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/drive/1bLVwVKpAndpEDHqjzxVPr_9nGrSbuOQd?usp=sharing)ã ð
- ç»åã®ååŸãšé¡äŒŒæ§ã¹ã³ã¢ã®è¡šç€ºã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/github/deep-diver/image_search_with_natural_language/blob/main/notebooks/Image_Search_CLIP.ipynb)ã ð
- å€èšèª CLIP ã䜿çšããŠç»åãšããã¹ããåããã¯ãã«ç©ºéã«ãããã³ã°ããæ¹æ³ã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/drive/1xO-wC_m_GNzgjIBQ4a4znvQkvDoZJvH4?usp=sharing)ã ð
- ã䜿çšããŠã»ãã³ãã£ã㯠ã€ã¡ãŒãžæ€çŽ¢ã§ CLIP ãå®è¡ããæ¹æ³ã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/github/vivien000/clip-demo/blob/master/clip.ipynb#scrollTo=uzdFhRGqiWkR) [Unsplash](https://unsplash.com) ããã³ [TMDB](https://www.themoviedb.org/) ããŒã¿ã»ããã ð
**説æå¯èœæ§**
- å
¥åããŒã¯ã³ãšç»åã»ã°ã¡ã³ãã®é¡äŒŒæ§ãèŠèŠåããæ¹æ³ã«é¢ãã [ããŒãããã¯](https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb)ã ð
ããã«å«ãããªãœãŒã¹ã®éä¿¡ã«èå³ãããå Žåã¯ããæ°è»œã«ãã« ãªã¯ãšã¹ããéããŠãã ããã審æ»ãããŠããã ããŸãã
ãªãœãŒã¹ã¯ãæ¢åã®ãªãœãŒã¹ãè€è£œããã®ã§ã¯ãªããäœãæ°ãããã®ã瀺ãããšãçæ³çã§ãã
## CLIPConfig
[[autodoc]] CLIPConfig
- from_text_vision_configs
## CLIPTextConfig
[[autodoc]] CLIPTextConfig
## CLIPVisionConfig
[[autodoc]] CLIPVisionConfig
## CLIPTokenizer
[[autodoc]] CLIPTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## CLIPTokenizerFast
[[autodoc]] CLIPTokenizerFast
## CLIPImageProcessor
[[autodoc]] CLIPImageProcessor
- preprocess
## CLIPImageProcessorFast
[[autodoc]] CLIPImageProcessorFast
- preprocess
## CLIPFeatureExtractor
[[autodoc]] CLIPFeatureExtractor
## CLIPProcessor
[[autodoc]] CLIPProcessor
<frameworkcontent>
<pt>
## CLIPModel
[[autodoc]] CLIPModel
- forward
- get_text_features
- get_image_features
## CLIPTextModel
[[autodoc]] CLIPTextModel
- forward
## CLIPTextModelWithProjection
[[autodoc]] CLIPTextModelWithProjection
- forward
## CLIPVisionModelWithProjection
[[autodoc]] CLIPVisionModelWithProjection
- forward
## CLIPVisionModel
[[autodoc]] CLIPVisionModel
- forward
</pt>
<tf>
## TFCLIPModel
[[autodoc]] TFCLIPModel
- call
- get_text_features
- get_image_features
## TFCLIPTextModel
[[autodoc]] TFCLIPTextModel
- call
## TFCLIPVisionModel
[[autodoc]] TFCLIPVisionModel
- call
</tf>
<jax>
## FlaxCLIPModel
[[autodoc]] FlaxCLIPModel
- __call__
- get_text_features
- get_image_features
## FlaxCLIPTextModel
[[autodoc]] FlaxCLIPTextModel
- __call__
## FlaxCLIPTextModelWithProjection
[[autodoc]] FlaxCLIPTextModelWithProjection
- __call__
## FlaxCLIPVisionModel
[[autodoc]] FlaxCLIPVisionModel
- __call__
</jax>
</frameworkcontent>
| transformers/docs/source/ja/model_doc/clip.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/clip.md",
"repo_id": "transformers",
"token_count": 4574
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.