text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import builtins import difflib import inspect import logging import math import re from collections.abc import Mapping from importlib import import_module from types import ModuleType from typing import Any, Callable, Dict, List, Optional, Tuple import numpy as np import pandas as pd from .utils import BASE_BUILTIN_MODULES, truncate_content logger = logging.getLogger(__name__) class InterpreterError(ValueError): """ An error raised when the interpreter cannot evaluate a Python expression, due to syntax error or unsupported operations. """ pass ERRORS = { name: getattr(builtins, name) for name in dir(builtins) if isinstance(getattr(builtins, name), type) and issubclass(getattr(builtins, name), BaseException) } DEFAULT_MAX_LEN_OUTPUT = 50000 MAX_OPERATIONS = 10000000 MAX_WHILE_ITERATIONS = 1000000 def custom_print(*args): return None BASE_PYTHON_TOOLS = { "print": custom_print, "isinstance": isinstance, "range": range, "float": float, "int": int, "bool": bool, "str": str, "set": set, "list": list, "dict": dict, "tuple": tuple, "round": round, "ceil": math.ceil, "floor": math.floor, "log": math.log, "exp": math.exp, "sin": math.sin, "cos": math.cos, "tan": math.tan, "asin": math.asin, "acos": math.acos, "atan": math.atan, "atan2": math.atan2, "degrees": math.degrees, "radians": math.radians, "pow": math.pow, "sqrt": math.sqrt, "len": len, "sum": sum, "max": max, "min": min, "abs": abs, "enumerate": enumerate, "zip": zip, "reversed": reversed, "sorted": sorted, "all": all, "any": any, "map": map, "filter": filter, "ord": ord, "chr": chr, "next": next, "iter": iter, "divmod": divmod, "callable": callable, "getattr": getattr, "hasattr": hasattr, "setattr": setattr, "issubclass": issubclass, "type": type, "complex": complex, } class PrintContainer: def __init__(self): self.value = "" def append(self, text): self.value += text return self def __iadd__(self, other): """Implements the += operator""" self.value += str(other) return self def __str__(self): """String representation""" return self.value def __repr__(self): """Representation for debugging""" return f"PrintContainer({self.value})" def __len__(self): """Implements len() function support""" return len(self.value) class BreakException(Exception): pass class ContinueException(Exception): pass class ReturnException(Exception): def __init__(self, value): self.value = value def get_iterable(obj): if isinstance(obj, list): return obj elif hasattr(obj, "__iter__"): return list(obj) else: raise InterpreterError("Object is not iterable") def fix_final_answer_code(code: str) -> str: """ Sometimes an LLM can try to assign a variable to final_answer, which would break the final_answer() tool. This function fixes this behaviour by replacing variable assignments to final_answer with final_answer_variable, while preserving function calls to final_answer(). """ # First, find if there's a direct assignment to final_answer # Use word boundary and negative lookbehind to ensure it's not an object attribute assignment_pattern = r"(?<!\.)(?<!\w)\bfinal_answer\s*=" if "final_answer(" not in code or not re.search(assignment_pattern, code): # If final_answer tool is not called in this blob, then doing the replacement is hazardous because it could false the model's memory for next steps. # Let's not modify the code and leave the subsequent assignment error happen. return code # Pattern for replacing variable assignments # Looks for 'final_answer' followed by '=' with optional whitespace # Negative lookbehind ensures we don't match object attributes assignment_regex = r"(?<!\.)(?<!\w)(\bfinal_answer)(\s*=)" code = re.sub(assignment_regex, r"final_answer_variable\2", code) # Pattern for replacing variable usage but not function calls # Negative lookahead (?!\s*\() ensures we don't match function calls # Negative lookbehind (?<!\.|\w) ensures we don't match object methods or other variables variable_regex = r"(?<!\.)(?<!\w)(\bfinal_answer\b)(?!\s*\()" code = re.sub(variable_regex, "final_answer_variable", code) return code def evaluate_unaryop( expression: ast.UnaryOp, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: operand = evaluate_ast(expression.operand, state, static_tools, custom_tools, authorized_imports) if isinstance(expression.op, ast.USub): return -operand elif isinstance(expression.op, ast.UAdd): return operand elif isinstance(expression.op, ast.Not): return not operand elif isinstance(expression.op, ast.Invert): return ~operand else: raise InterpreterError(f"Unary operation {expression.op.__class__.__name__} is not supported.") def evaluate_lambda( lambda_expression: ast.Lambda, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Callable: args = [arg.arg for arg in lambda_expression.args.args] def lambda_func(*values: Any) -> Any: new_state = state.copy() for arg, value in zip(args, values): new_state[arg] = value return evaluate_ast( lambda_expression.body, new_state, static_tools, custom_tools, authorized_imports, ) return lambda_func def evaluate_while( while_loop: ast.While, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: iterations = 0 while evaluate_ast(while_loop.test, state, static_tools, custom_tools, authorized_imports): for node in while_loop.body: try: evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) except BreakException: return None except ContinueException: break iterations += 1 if iterations > MAX_WHILE_ITERATIONS: raise InterpreterError(f"Maximum number of {MAX_WHILE_ITERATIONS} iterations in While loop exceeded") return None def create_function( func_def: ast.FunctionDef, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Callable: def new_func(*args: Any, **kwargs: Any) -> Any: func_state = state.copy() arg_names = [arg.arg for arg in func_def.args.args] default_values = [ evaluate_ast(d, state, static_tools, custom_tools, authorized_imports) for d in func_def.args.defaults ] # Apply default values defaults = dict(zip(arg_names[-len(default_values) :], default_values)) # Set positional arguments for name, value in zip(arg_names, args): func_state[name] = value # Set keyword arguments for name, value in kwargs.items(): func_state[name] = value # Handle variable arguments if func_def.args.vararg: vararg_name = func_def.args.vararg.arg func_state[vararg_name] = args if func_def.args.kwarg: kwarg_name = func_def.args.kwarg.arg func_state[kwarg_name] = kwargs # Set default values for arguments that were not provided for name, value in defaults.items(): if name not in func_state: func_state[name] = value # Update function state with self and __class__ if func_def.args.args and func_def.args.args[0].arg == "self": if args: func_state["self"] = args[0] func_state["__class__"] = args[0].__class__ result = None try: for stmt in func_def.body: result = evaluate_ast(stmt, func_state, static_tools, custom_tools, authorized_imports) except ReturnException as e: result = e.value if func_def.name == "__init__": return None return result return new_func def evaluate_function_def( func_def: ast.FunctionDef, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Callable: custom_tools[func_def.name] = create_function(func_def, state, static_tools, custom_tools, authorized_imports) return custom_tools[func_def.name] def evaluate_class_def( class_def: ast.ClassDef, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> type: class_name = class_def.name bases = [evaluate_ast(base, state, static_tools, custom_tools, authorized_imports) for base in class_def.bases] class_dict = {} for stmt in class_def.body: if isinstance(stmt, ast.FunctionDef): class_dict[stmt.name] = evaluate_function_def(stmt, state, static_tools, custom_tools, authorized_imports) elif isinstance(stmt, ast.Assign): for target in stmt.targets: if isinstance(target, ast.Name): class_dict[target.id] = evaluate_ast( stmt.value, state, static_tools, custom_tools, authorized_imports, ) elif isinstance(target, ast.Attribute): class_dict[target.attr] = evaluate_ast( stmt.value, state, static_tools, custom_tools, authorized_imports, ) else: raise InterpreterError(f"Unsupported statement in class body: {stmt.__class__.__name__}") new_class = type(class_name, tuple(bases), class_dict) state[class_name] = new_class return new_class def evaluate_augassign( expression: ast.AugAssign, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: def get_current_value(target: ast.AST) -> Any: if isinstance(target, ast.Name): return state.get(target.id, 0) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports) return obj[key] elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) return getattr(obj, target.attr) elif isinstance(target, ast.Tuple): return tuple(get_current_value(elt) for elt in target.elts) elif isinstance(target, ast.List): return [get_current_value(elt) for elt in target.elts] else: raise InterpreterError("AugAssign not supported for {type(target)} targets.") current_value = get_current_value(expression.target) value_to_add = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) if isinstance(expression.op, ast.Add): if isinstance(current_value, list): if not isinstance(value_to_add, list): raise InterpreterError(f"Cannot add non-list value {value_to_add} to a list.") current_value += value_to_add else: current_value += value_to_add elif isinstance(expression.op, ast.Sub): current_value -= value_to_add elif isinstance(expression.op, ast.Mult): current_value *= value_to_add elif isinstance(expression.op, ast.Div): current_value /= value_to_add elif isinstance(expression.op, ast.Mod): current_value %= value_to_add elif isinstance(expression.op, ast.Pow): current_value **= value_to_add elif isinstance(expression.op, ast.FloorDiv): current_value //= value_to_add elif isinstance(expression.op, ast.BitAnd): current_value &= value_to_add elif isinstance(expression.op, ast.BitOr): current_value |= value_to_add elif isinstance(expression.op, ast.BitXor): current_value ^= value_to_add elif isinstance(expression.op, ast.LShift): current_value <<= value_to_add elif isinstance(expression.op, ast.RShift): current_value >>= value_to_add else: raise InterpreterError(f"Operation {type(expression.op).__name__} is not supported.") # Update the state: current_value has been updated in-place set_value( expression.target, current_value, state, static_tools, custom_tools, authorized_imports, ) return current_value def evaluate_boolop( node: ast.BoolOp, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> bool: if isinstance(node.op, ast.And): for value in node.values: if not evaluate_ast(value, state, static_tools, custom_tools, authorized_imports): return False return True elif isinstance(node.op, ast.Or): for value in node.values: if evaluate_ast(value, state, static_tools, custom_tools, authorized_imports): return True return False def evaluate_binop( binop: ast.BinOp, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: # Recursively evaluate the left and right operands left_val = evaluate_ast(binop.left, state, static_tools, custom_tools, authorized_imports) right_val = evaluate_ast(binop.right, state, static_tools, custom_tools, authorized_imports) # Determine the operation based on the type of the operator in the BinOp if isinstance(binop.op, ast.Add): return left_val + right_val elif isinstance(binop.op, ast.Sub): return left_val - right_val elif isinstance(binop.op, ast.Mult): return left_val * right_val elif isinstance(binop.op, ast.Div): return left_val / right_val elif isinstance(binop.op, ast.Mod): return left_val % right_val elif isinstance(binop.op, ast.Pow): return left_val**right_val elif isinstance(binop.op, ast.FloorDiv): return left_val // right_val elif isinstance(binop.op, ast.BitAnd): return left_val & right_val elif isinstance(binop.op, ast.BitOr): return left_val | right_val elif isinstance(binop.op, ast.BitXor): return left_val ^ right_val elif isinstance(binop.op, ast.LShift): return left_val << right_val elif isinstance(binop.op, ast.RShift): return left_val >> right_val else: raise NotImplementedError(f"Binary operation {type(binop.op).__name__} is not implemented.") def evaluate_assign( assign: ast.Assign, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: result = evaluate_ast(assign.value, state, static_tools, custom_tools, authorized_imports) if len(assign.targets) == 1: target = assign.targets[0] set_value(target, result, state, static_tools, custom_tools, authorized_imports) else: if len(assign.targets) != len(result): raise InterpreterError(f"Assign failed: expected {len(result)} values but got {len(assign.targets)}.") expanded_values = [] for tgt in assign.targets: if isinstance(tgt, ast.Starred): expanded_values.extend(result) else: expanded_values.append(result) for tgt, val in zip(assign.targets, expanded_values): set_value(tgt, val, state, static_tools, custom_tools, authorized_imports) return result def set_value( target: ast.AST, value: Any, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: if isinstance(target, ast.Name): if target.id in static_tools: raise InterpreterError(f"Cannot assign to name '{target.id}': doing this would erase the existing tool!") state[target.id] = value elif isinstance(target, ast.Tuple): if not isinstance(value, tuple): if hasattr(value, "__iter__") and not isinstance(value, (str, bytes)): value = tuple(value) else: raise InterpreterError("Cannot unpack non-tuple value") if len(target.elts) != len(value): raise InterpreterError("Cannot unpack tuple of wrong size") for i, elem in enumerate(target.elts): set_value(elem, value[i], state, static_tools, custom_tools, authorized_imports) elif isinstance(target, ast.Subscript): obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) key = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports) obj[key] = value elif isinstance(target, ast.Attribute): obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) setattr(obj, target.attr, value) def evaluate_call( call: ast.Call, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: if not ( isinstance(call.func, ast.Attribute) or isinstance(call.func, ast.Name) or isinstance(call.func, ast.Subscript) ): raise InterpreterError(f"This is not a correct function: {call.func}).") if isinstance(call.func, ast.Attribute): obj = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports) func_name = call.func.attr if not hasattr(obj, func_name): raise InterpreterError(f"Object {obj} has no attribute {func_name}") func = getattr(obj, func_name) elif isinstance(call.func, ast.Name): func_name = call.func.id if func_name in state: func = state[func_name] elif func_name in static_tools: func = static_tools[func_name] elif func_name in custom_tools: func = custom_tools[func_name] elif func_name in ERRORS: func = ERRORS[func_name] else: raise InterpreterError( f"It is not permitted to evaluate other functions than the provided tools or functions defined/imported in previous code (tried to execute {call.func.id})." ) elif isinstance(call.func, ast.Subscript): value = evaluate_ast(call.func.value, state, static_tools, custom_tools, authorized_imports) index = evaluate_ast(call.func.slice, state, static_tools, custom_tools, authorized_imports) if isinstance(value, (list, tuple)): func = value[index] else: raise InterpreterError(f"Cannot subscript object of type {type(value).__name__}") if not callable(func): raise InterpreterError(f"This is not a correct function: {call.func}).") func_name = None args = [] for arg in call.args: if isinstance(arg, ast.Starred): args.extend(evaluate_ast(arg.value, state, static_tools, custom_tools, authorized_imports)) else: args.append(evaluate_ast(arg, state, static_tools, custom_tools, authorized_imports)) kwargs = { keyword.arg: evaluate_ast(keyword.value, state, static_tools, custom_tools, authorized_imports) for keyword in call.keywords } if func_name == "super": if not args: if "__class__" in state and "self" in state: return super(state["__class__"], state["self"]) else: raise InterpreterError("super() needs at least one argument") cls = args[0] if not isinstance(cls, type): raise InterpreterError("super() argument 1 must be type") if len(args) == 1: return super(cls) elif len(args) == 2: instance = args[1] return super(cls, instance) else: raise InterpreterError("super() takes at most 2 arguments") else: if func_name == "print": state["_print_outputs"] += " ".join(map(str, args)) + "\n" return None else: # Assume it's a callable object if ( (inspect.getmodule(func) == builtins) and inspect.isbuiltin(func) and (func not in static_tools.values()) ): raise InterpreterError( f"Invoking a builtin function that has not been explicitly added as a tool is not allowed ({func_name})." ) return func(*args, **kwargs) def evaluate_subscript( subscript: ast.Subscript, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: index = evaluate_ast(subscript.slice, state, static_tools, custom_tools, authorized_imports) value = evaluate_ast(subscript.value, state, static_tools, custom_tools, authorized_imports) if isinstance(value, str) and isinstance(index, str): raise InterpreterError("You're trying to subscript a string with a string index, which is impossible") if isinstance(value, pd.core.indexing._LocIndexer): parent_object = value.obj return parent_object.loc[index] if isinstance(value, pd.core.indexing._iLocIndexer): parent_object = value.obj return parent_object.iloc[index] if isinstance(value, (pd.DataFrame, pd.Series, np.ndarray)): return value[index] elif isinstance(value, pd.core.groupby.generic.DataFrameGroupBy): return value[index] elif isinstance(index, slice): return value[index] elif isinstance(value, (list, tuple)): if not (-len(value) <= index < len(value)): raise InterpreterError(f"Index {index} out of bounds for list of length {len(value)}") return value[int(index)] elif isinstance(value, str): if not (-len(value) <= index < len(value)): raise InterpreterError(f"Index {index} out of bounds for string of length {len(value)}") return value[index] elif index in value: return value[index] else: error_message = f"Could not index {value} with '{index}'." if isinstance(index, str) and isinstance(value, Mapping): close_matches = difflib.get_close_matches(index, list(value.keys())) if len(close_matches) > 0: error_message += f" Maybe you meant one of these indexes instead: {str(close_matches)}" raise InterpreterError(error_message) def evaluate_name( name: ast.Name, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: if name.id in state: return state[name.id] elif name.id in static_tools: return static_tools[name.id] elif name.id in custom_tools: return custom_tools[name.id] elif name.id in ERRORS: return ERRORS[name.id] close_matches = difflib.get_close_matches(name.id, list(state.keys())) if len(close_matches) > 0: return state[close_matches[0]] raise InterpreterError(f"The variable `{name.id}` is not defined.") def evaluate_condition( condition: ast.Compare, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> bool: left = evaluate_ast(condition.left, state, static_tools, custom_tools, authorized_imports) comparators = [ evaluate_ast(c, state, static_tools, custom_tools, authorized_imports) for c in condition.comparators ] ops = [type(op) for op in condition.ops] result = True current_left = left for op, comparator in zip(ops, comparators): if op == ast.Eq: current_result = current_left == comparator elif op == ast.NotEq: current_result = current_left != comparator elif op == ast.Lt: current_result = current_left < comparator elif op == ast.LtE: current_result = current_left <= comparator elif op == ast.Gt: current_result = current_left > comparator elif op == ast.GtE: current_result = current_left >= comparator elif op == ast.Is: current_result = current_left is comparator elif op == ast.IsNot: current_result = current_left is not comparator elif op == ast.In: current_result = current_left in comparator elif op == ast.NotIn: current_result = current_left not in comparator else: raise InterpreterError(f"Operator not supported: {op}") result = result & current_result current_left = comparator if isinstance(result, bool) and not result: break return result if isinstance(result, (bool, pd.Series)) else result.all() def evaluate_if( if_statement: ast.If, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: result = None test_result = evaluate_ast(if_statement.test, state, static_tools, custom_tools, authorized_imports) if test_result: for line in if_statement.body: line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result else: for line in if_statement.orelse: line_result = evaluate_ast(line, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result return result def evaluate_for( for_loop: ast.For, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Any: result = None iterator = evaluate_ast(for_loop.iter, state, static_tools, custom_tools, authorized_imports) for counter in iterator: set_value( for_loop.target, counter, state, static_tools, custom_tools, authorized_imports, ) for node in for_loop.body: try: line_result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) if line_result is not None: result = line_result except BreakException: break except ContinueException: continue else: continue break return result def evaluate_listcomp( listcomp: ast.ListComp, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> List[Any]: def inner_evaluate(generators: List[ast.comprehension], index: int, current_state: Dict[str, Any]) -> List[Any]: if index >= len(generators): return [ evaluate_ast( listcomp.elt, current_state, static_tools, custom_tools, authorized_imports, ) ] generator = generators[index] iter_value = evaluate_ast( generator.iter, current_state, static_tools, custom_tools, authorized_imports, ) result = [] for value in iter_value: new_state = current_state.copy() if isinstance(generator.target, ast.Tuple): for idx, elem in enumerate(generator.target.elts): new_state[elem.id] = value[idx] else: new_state[generator.target.id] = value if all( evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports) for if_clause in generator.ifs ): result.extend(inner_evaluate(generators, index + 1, new_state)) return result return inner_evaluate(listcomp.generators, 0, state) def evaluate_try( try_node: ast.Try, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: try: for stmt in try_node.body: evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) except Exception as e: matched = False for handler in try_node.handlers: if handler.type is None or isinstance( e, evaluate_ast(handler.type, state, static_tools, custom_tools, authorized_imports), ): matched = True if handler.name: state[handler.name] = e for stmt in handler.body: evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) break if not matched: raise e else: if try_node.orelse: for stmt in try_node.orelse: evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) finally: if try_node.finalbody: for stmt in try_node.finalbody: evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) def evaluate_raise( raise_node: ast.Raise, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: if raise_node.exc is not None: exc = evaluate_ast(raise_node.exc, state, static_tools, custom_tools, authorized_imports) else: exc = None if raise_node.cause is not None: cause = evaluate_ast(raise_node.cause, state, static_tools, custom_tools, authorized_imports) else: cause = None if exc is not None: if cause is not None: raise exc from cause else: raise exc else: raise InterpreterError("Re-raise is not supported without an active exception") def evaluate_assert( assert_node: ast.Assert, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: test_result = evaluate_ast(assert_node.test, state, static_tools, custom_tools, authorized_imports) if not test_result: if assert_node.msg: msg = evaluate_ast(assert_node.msg, state, static_tools, custom_tools, authorized_imports) raise AssertionError(msg) else: # Include the failing condition in the assertion message test_code = ast.unparse(assert_node.test) raise AssertionError(f"Assertion failed: {test_code}") def evaluate_with( with_node: ast.With, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: contexts = [] for item in with_node.items: context_expr = evaluate_ast(item.context_expr, state, static_tools, custom_tools, authorized_imports) if item.optional_vars: state[item.optional_vars.id] = context_expr.__enter__() contexts.append(state[item.optional_vars.id]) else: context_var = context_expr.__enter__() contexts.append(context_var) try: for stmt in with_node.body: evaluate_ast(stmt, state, static_tools, custom_tools, authorized_imports) except Exception as e: for context in reversed(contexts): context.__exit__(type(e), e, e.__traceback__) raise else: for context in reversed(contexts): context.__exit__(None, None, None) def get_safe_module(raw_module, dangerous_patterns, authorized_imports, visited=None): """Creates a safe copy of a module or returns the original if it's a function""" # If it's a function or non-module object, return it directly if not isinstance(raw_module, ModuleType): return raw_module # Handle circular references: Initialize visited set for the first call if visited is None: visited = set() module_id = id(raw_module) if module_id in visited: return raw_module # Return original for circular refs visited.add(module_id) # Create new module for actual modules safe_module = ModuleType(raw_module.__name__) # Copy all attributes by reference, recursively checking modules for attr_name in dir(raw_module): # Skip dangerous patterns at any level if any( pattern in raw_module.__name__.split(".") + [attr_name] and pattern not in authorized_imports for pattern in dangerous_patterns ): logger.info(f"Skipping dangerous attribute {raw_module.__name__}.{attr_name}") continue try: attr_value = getattr(raw_module, attr_name) except ImportError as e: # lazy / dynamic loading module -> INFO log and skip logger.info( f"Skipping import error while copying {raw_module.__name__}.{attr_name}: {type(e).__name__} - {e}" ) continue # Recursively process nested modules, passing visited set if isinstance(attr_value, ModuleType): attr_value = get_safe_module(attr_value, dangerous_patterns, authorized_imports, visited=visited) setattr(safe_module, attr_name, attr_value) return safe_module def import_modules(expression, state, authorized_imports): dangerous_patterns = ( "_os", "os", "subprocess", "_subprocess", "pty", "system", "popen", "spawn", "shutil", "sys", "pathlib", "io", "socket", "compile", "eval", "exec", "multiprocessing", ) def check_module_authorized(module_name): if "*" in authorized_imports: return True else: module_path = module_name.split(".") if any([module in dangerous_patterns and module not in authorized_imports for module in module_path]): return False module_subpaths = [".".join(module_path[:i]) for i in range(1, len(module_path) + 1)] return any(subpath in authorized_imports for subpath in module_subpaths) if isinstance(expression, ast.Import): for alias in expression.names: if check_module_authorized(alias.name): raw_module = import_module(alias.name) state[alias.asname or alias.name] = get_safe_module(raw_module, dangerous_patterns, authorized_imports) else: raise InterpreterError( f"Import of {alias.name} is not allowed. Authorized imports are: {str(authorized_imports)}" ) return None elif isinstance(expression, ast.ImportFrom): if check_module_authorized(expression.module): raw_module = __import__(expression.module, fromlist=[alias.name for alias in expression.names]) module = get_safe_module(raw_module, dangerous_patterns, authorized_imports) if expression.names[0].name == "*": # Handle "from module import *" if hasattr(module, "__all__"): # If module has __all__, import only those names for name in module.__all__: state[name] = getattr(module, name) else: # If no __all__, import all public names (those not starting with '_') for name in dir(module): if not name.startswith("_"): state[name] = getattr(module, name) else: # regular from imports for alias in expression.names: if hasattr(module, alias.name): state[alias.asname or alias.name] = getattr(module, alias.name) else: raise InterpreterError(f"Module {expression.module} has no attribute {alias.name}") else: raise InterpreterError( f"Import from {expression.module} is not allowed. Authorized imports are: {str(authorized_imports)}" ) return None def evaluate_dictcomp( dictcomp: ast.DictComp, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> Dict[Any, Any]: result = {} for gen in dictcomp.generators: iter_value = evaluate_ast(gen.iter, state, static_tools, custom_tools, authorized_imports) for value in iter_value: new_state = state.copy() set_value( gen.target, value, new_state, static_tools, custom_tools, authorized_imports, ) if all( evaluate_ast(if_clause, new_state, static_tools, custom_tools, authorized_imports) for if_clause in gen.ifs ): key = evaluate_ast( dictcomp.key, new_state, static_tools, custom_tools, authorized_imports, ) val = evaluate_ast( dictcomp.value, new_state, static_tools, custom_tools, authorized_imports, ) result[key] = val return result def evaluate_delete( delete_node: ast.Delete, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str], ) -> None: """ Evaluate a delete statement (del x, del x[y]). Args: delete_node: The AST Delete node to evaluate state: The current state dictionary static_tools: Dictionary of static tools custom_tools: Dictionary of custom tools authorized_imports: List of authorized imports """ for target in delete_node.targets: if isinstance(target, ast.Name): # Handle simple variable deletion (del x) if target.id in state: del state[target.id] else: raise InterpreterError(f"Cannot delete name '{target.id}': name is not defined") elif isinstance(target, ast.Subscript): # Handle index/key deletion (del x[y]) obj = evaluate_ast(target.value, state, static_tools, custom_tools, authorized_imports) index = evaluate_ast(target.slice, state, static_tools, custom_tools, authorized_imports) try: del obj[index] except (TypeError, KeyError, IndexError) as e: raise InterpreterError(f"Cannot delete index/key: {str(e)}") else: raise InterpreterError(f"Deletion of {type(target).__name__} targets is not supported") def evaluate_ast( expression: ast.AST, state: Dict[str, Any], static_tools: Dict[str, Callable], custom_tools: Dict[str, Callable], authorized_imports: List[str] = BASE_BUILTIN_MODULES, ): """ Evaluate an abstract syntax tree using the content of the variables stored in a state and only evaluating a given set of functions. This function will recurse through the nodes of the tree provided. Args: expression (`ast.AST`): The code to evaluate, as an abstract syntax tree. state (`Dict[str, Any]`): A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation encounters assignments. static_tools (`Dict[str, Callable]`): Functions that may be called during the evaluation. Trying to change one of these static_tools will raise an error. custom_tools (`Dict[str, Callable]`): Functions that may be called during the evaluation. These static_tools can be overwritten. authorized_imports (`List[str]`): The list of modules that can be imported by the code. By default, only a few safe modules are allowed. If it contains "*", it will authorize any import. Use this at your own risk! """ if state["_operations_count"] >= MAX_OPERATIONS: raise InterpreterError( f"Reached the max number of operations of {MAX_OPERATIONS}. Maybe there is an infinite loop somewhere in the code, or you're just asking too many calculations." ) state["_operations_count"] += 1 if isinstance(expression, ast.Assign): # Assignment -> we evaluate the assignment which should update the state # We return the variable assigned as it may be used to determine the final result. return evaluate_assign(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.AugAssign): return evaluate_augassign(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Call): # Function call -> we return the value of the function call return evaluate_call(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Constant): # Constant -> just return the value return expression.value elif isinstance(expression, ast.Tuple): return tuple( evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts ) elif isinstance(expression, (ast.ListComp, ast.GeneratorExp)): return evaluate_listcomp(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.UnaryOp): return evaluate_unaryop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Starred): return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.BoolOp): # Boolean operation -> evaluate the operation return evaluate_boolop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Break): raise BreakException() elif isinstance(expression, ast.Continue): raise ContinueException() elif isinstance(expression, ast.BinOp): # Binary operation -> execute operation return evaluate_binop(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Compare): # Comparison -> evaluate the comparison return evaluate_condition(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Lambda): return evaluate_lambda(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.FunctionDef): return evaluate_function_def(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Dict): # Dict -> evaluate all keys and values keys = [evaluate_ast(k, state, static_tools, custom_tools, authorized_imports) for k in expression.keys] values = [evaluate_ast(v, state, static_tools, custom_tools, authorized_imports) for v in expression.values] return dict(zip(keys, values)) elif isinstance(expression, ast.Expr): # Expression -> evaluate the content return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.For): # For loop -> execute the loop return evaluate_for(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.FormattedValue): # Formatted value (part of f-string) -> evaluate the content and return return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.If): # If -> execute the right branch return evaluate_if(expression, state, static_tools, custom_tools, authorized_imports) elif hasattr(ast, "Index") and isinstance(expression, ast.Index): return evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.JoinedStr): return "".join( [str(evaluate_ast(v, state, static_tools, custom_tools, authorized_imports)) for v in expression.values] ) elif isinstance(expression, ast.List): # List -> evaluate all elements return [evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts] elif isinstance(expression, ast.Name): # Name -> pick up the value in the state return evaluate_name(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Subscript): # Subscript -> return the value of the indexing return evaluate_subscript(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.IfExp): test_val = evaluate_ast(expression.test, state, static_tools, custom_tools, authorized_imports) if test_val: return evaluate_ast(expression.body, state, static_tools, custom_tools, authorized_imports) else: return evaluate_ast(expression.orelse, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Attribute): value = evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) return getattr(value, expression.attr) elif isinstance(expression, ast.Slice): return slice( evaluate_ast(expression.lower, state, static_tools, custom_tools, authorized_imports) if expression.lower is not None else None, evaluate_ast(expression.upper, state, static_tools, custom_tools, authorized_imports) if expression.upper is not None else None, evaluate_ast(expression.step, state, static_tools, custom_tools, authorized_imports) if expression.step is not None else None, ) elif isinstance(expression, ast.DictComp): return evaluate_dictcomp(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.While): return evaluate_while(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, (ast.Import, ast.ImportFrom)): return import_modules(expression, state, authorized_imports) elif isinstance(expression, ast.ClassDef): return evaluate_class_def(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Try): return evaluate_try(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Raise): return evaluate_raise(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Assert): return evaluate_assert(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.With): return evaluate_with(expression, state, static_tools, custom_tools, authorized_imports) elif isinstance(expression, ast.Set): return {evaluate_ast(elt, state, static_tools, custom_tools, authorized_imports) for elt in expression.elts} elif isinstance(expression, ast.Return): raise ReturnException( evaluate_ast(expression.value, state, static_tools, custom_tools, authorized_imports) if expression.value else None ) elif isinstance(expression, ast.Pass): return None elif isinstance(expression, ast.Delete): return evaluate_delete(expression, state, static_tools, custom_tools, authorized_imports) else: # For now we refuse anything else. Let's add things as we need them. raise InterpreterError(f"{expression.__class__.__name__} is not supported.") class FinalAnswerException(Exception): def __init__(self, value): self.value = value def evaluate_python_code( code: str, static_tools: Optional[Dict[str, Callable]] = None, custom_tools: Optional[Dict[str, Callable]] = None, state: Optional[Dict[str, Any]] = None, authorized_imports: List[str] = BASE_BUILTIN_MODULES, max_print_outputs_length: int = DEFAULT_MAX_LEN_OUTPUT, ): """ Evaluate a python expression using the content of the variables stored in a state and only evaluating a given set of functions. This function will recurse through the nodes of the tree provided. Args: code (`str`): The code to evaluate. static_tools (`Dict[str, Callable]`): The functions that may be called during the evaluation. These can also be agents in a multiagent setting. These tools cannot be overwritten in the code: any assignment to their name will raise an error. custom_tools (`Dict[str, Callable]`): The functions that may be called during the evaluation. These tools can be overwritten in the code: any assignment to their name will overwrite them. state (`Dict[str, Any]`): A dictionary mapping variable names to values. The `state` should contain the initial inputs but will be updated by this function to contain all variables as they are evaluated. The print outputs will be stored in the state under the key "_print_outputs". """ try: expression = ast.parse(code) except SyntaxError as e: raise InterpreterError( f"Code parsing failed on line {e.lineno} due to: {type(e).__name__}\n" f"{e.text}" f"{' ' * (e.offset or 0)}^\n" f"Error: {str(e)}" ) if state is None: state = {} static_tools = static_tools.copy() if static_tools is not None else {} custom_tools = custom_tools if custom_tools is not None else {} result = None state["_print_outputs"] = PrintContainer() state["_operations_count"] = 0 def final_answer(value): raise FinalAnswerException(value) static_tools["final_answer"] = final_answer try: for node in expression.body: result = evaluate_ast(node, state, static_tools, custom_tools, authorized_imports) state["_print_outputs"].value = truncate_content( str(state["_print_outputs"]), max_length=max_print_outputs_length ) is_final_answer = False return result, is_final_answer except FinalAnswerException as e: state["_print_outputs"].value = truncate_content( str(state["_print_outputs"]), max_length=max_print_outputs_length ) is_final_answer = True return e.value, is_final_answer except Exception as e: exception_type = type(e).__name__ state["_print_outputs"].value = truncate_content( str(state["_print_outputs"]), max_length=max_print_outputs_length ) raise InterpreterError( f"Code execution failed at line '{ast.get_source_segment(code, node)}' due to: {exception_type}:{str(e)}" ) class LocalPythonInterpreter: def __init__( self, additional_authorized_imports: List[str], tools: Dict, max_print_outputs_length: Optional[int] = None, ): self.custom_tools = {} self.state = {} self.max_print_outputs_length = max_print_outputs_length if max_print_outputs_length is None: self.max_print_outputs_length = DEFAULT_MAX_LEN_OUTPUT self.additional_authorized_imports = additional_authorized_imports self.authorized_imports = list(set(BASE_BUILTIN_MODULES) | set(self.additional_authorized_imports)) # Add base trusted tools to list self.static_tools = { **tools, **BASE_PYTHON_TOOLS.copy(), } # TODO: assert self.authorized imports are all installed locally def __call__(self, code_action: str, additional_variables: Dict) -> Tuple[Any, str, bool]: self.state.update(additional_variables) output, is_final_answer = evaluate_python_code( code_action, static_tools=self.static_tools, custom_tools=self.custom_tools, state=self.state, authorized_imports=self.authorized_imports, max_print_outputs_length=self.max_print_outputs_length, ) logs = str(self.state["_print_outputs"]) return output, logs, is_final_answer __all__ = ["evaluate_python_code", "LocalPythonInterpreter"]
smolagents/src/smolagents/local_python_executor.py/0
{ "file_path": "smolagents/src/smolagents/local_python_executor.py", "repo_id": "smolagents", "token_count": 22987 }
from unittest.mock import MagicMock, patch from smolagents.e2b_executor import E2BExecutor class TestE2BExecutor: def test_e2b_executor_instantiation(self): logger = MagicMock() with patch("e2b_code_interpreter.Sandbox") as mock_sandbox: mock_sandbox.return_value.commands.run.return_value.error = None mock_sandbox.return_value.run_code.return_value.error = None executor = E2BExecutor(additional_imports=[], tools=[], logger=logger) assert isinstance(executor, E2BExecutor) assert executor.logger == logger assert executor.final_answer is False assert executor.custom_tools == {} assert executor.final_answer_pattern.pattern == r"final_answer\((.*?)\)" assert executor.sbx == mock_sandbox.return_value
smolagents/tests/test_e2b_executor.py/0
{ "file_path": "smolagents/tests/test_e2b_executor.py", "repo_id": "smolagents", "token_count": 333 }
use std::fs; fn main() -> Result<(), Box<dyn std::error::Error>> { println!("cargo:rerun-if-changed=../../proto/"); fs::create_dir_all("src/v2/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/v2/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"]) .map_err(|e| match e.kind(){ std::io::ErrorKind::NotFound => {panic!("`protoc` not found, install libprotoc")}, std::io::ErrorKind::Other => {panic!("`protoc` version unsupported, upgrade protoc: https://github.com/protocolbuffers/protobuf/releases")}, e => {e} }).unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); fs::create_dir_all("src/v3/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/v3/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/v3/generate.proto"], &["../../proto"]) .unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); Ok(()) }
text-generation-inference/backends/client/build.rs/0
{ "file_path": "text-generation-inference/backends/client/build.rs", "repo_id": "text-generation-inference", "token_count": 624 }
set(TRT_INCLUDE_DIR ${TGI_TRTLLM_BACKEND_TRT_INCLUDE_DIR}) set(TRT_LIB_DIR ${TGI_TRTLLM_BACKEND_TRT_LIB_DIR}) set(USE_CXX11_ABI ON) set(BUILD_PYT OFF) set(BUILD_PYBIND OFF) set(BUILD_MICRO_BENCHMARKS OFF) set(BUILD_BENCHMARKS OFF) set(BUILD_TESTS OFF) set(CMAKE_CUDA_ARCHITECTURES ${TGI_TRTLLM_BACKEND_TARGET_CUDA_ARCH_LIST}) message(STATUS "Building for CUDA Architectures: ${CMAKE_CUDA_ARCHITECTURES}") set(ENABLE_UCX OFF) if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") set(FAST_BUILD ON) set(NVTX_DISABLE ON) set(INDEX_RANGE_CHECK ON) else () set(FAST_BUILD OFF) set(FAST_MATH ON) set(NVTX_DISABLE OFF) set(INDEX_RANGE_CHECK OFF) endif () find_package(Python3 REQUIRED Interpreter) fetchcontent_declare( trtllm GIT_REPOSITORY https://github.com/nvidia/TensorRT-LLM.git GIT_TAG v0.17.0 GIT_SHALLOW ON DOWNLOAD_EXTRACT_TIMESTAMP ) fetchcontent_makeavailable(trtllm) message(STATUS "Found TensorRT-LLM: ${trtllm_SOURCE_DIR}") execute_process(COMMAND git lfs install WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/") execute_process(COMMAND git lfs pull WORKING_DIRECTORY "${trtllm_SOURCE_DIR}/") # TRTLLM use a JIT based *precompiled* library to generate some specific kernels, we are generating the path to this one here set(TRTLLM_NVRTC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_nvrtc_wrapper${CMAKE_SHARED_LIBRARY_SUFFIX}" CACHE INTERNAL "nvrtc wrapper library name") set(TRTLLM_NVRTC_WRAPPER_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/kernels/decoderMaskedMultiheadAttention/decoderXQAImplJIT/nvrtcWrapper/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_NVRTC_LIBRARY_NAME}" CACHE INTERNAL "nvrtc wrapper library path") # The same Executor Static library set(TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME "${CMAKE_SHARED_LIBRARY_PREFIX}tensorrt_llm_executor_static${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE INTERNAL "executor_static library name") set(TRTLLM_EXECUTOR_STATIC_LIBRARY_PATH "${trtllm_SOURCE_DIR}/cpp/tensorrt_llm/executor/${CMAKE_LIBRARY_ARCHITECTURE}/${TRTLLM_EXECUTOR_STATIC_LIBRARY_NAME}" CACHE INTERNAL "executor_static library path")
text-generation-inference/backends/trtllm/cmake/trtllm.cmake/0
{ "file_path": "text-generation-inference/backends/trtllm/cmake/trtllm.cmake", "repo_id": "text-generation-inference", "token_count": 976 }
mod backend; pub mod block_allocator; mod client; mod queue; pub mod radix; use crate::client::{ClientError, ShardedClient}; pub(crate) use backend::BackendV3; use serde::Serialize; use thiserror::Error; use utoipa::ToSchema; #[derive(Clone, Debug, Serialize, ToSchema)] pub struct BackendInfo { /// Mandatory #[schema(example = "cuda")] pub model_device_type: String, #[schema(example = "torch.float16")] pub model_dtype: String, /// Backend parameters #[schema(example = "1")] pub speculate: usize, #[schema(example = "1.2")] pub waiting_served_ratio: f32, #[schema(example = "32000")] pub max_batch_total_tokens: u32, #[schema(example = "20")] pub max_waiting_tokens: usize, #[schema(nullable = true, example = "null")] pub max_batch_size: Option<usize>, #[schema(example = "false")] pub support_chunking: bool, #[schema(example = "false")] pub prefix_caching: bool, #[schema(example = "flashinfer")] pub attention_impl: String, #[schema(example = "1")] pub block_size: u32, #[schema(example = "30000")] pub max_input_tokens: usize, #[schema(example = "32000")] pub max_total_tokens: usize, } #[allow(clippy::too_many_arguments)] pub async fn connect_backend( max_input_tokens: Option<usize>, max_total_tokens: Option<usize>, master_shard_uds_path: String, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: Option<u32>, max_waiting_tokens: usize, max_batch_size: Option<usize>, ) -> Result<(BackendV3, BackendInfo), V3Error> { // Helper function let check_max_batch_total_tokens = |( max_supported_batch_total_tokens, shard_max_input_tokens, shard_max_total_tokens, ): (Option<u32>, u32, u32)| -> Result<(u32, usize, usize), V3Error> { if let Some(max_input_tokens) = max_input_tokens { assert_eq!(max_input_tokens as u32, shard_max_input_tokens); } if let Some(max_total_tokens) = max_total_tokens { assert_eq!(max_total_tokens as u32, shard_max_total_tokens); } match max_supported_batch_total_tokens { // Older models do not support automatic max-batch-total-tokens None => { let max_batch_total_tokens = max_batch_total_tokens.unwrap_or( 16000 .max(shard_max_total_tokens) .max(max_batch_prefill_tokens), ); tracing::warn!("Model does not support automatic max batch total tokens"); Ok(( max_batch_total_tokens, shard_max_input_tokens as usize, shard_max_total_tokens as usize, )) } // Flash attention models return their max supported total tokens Some(max_supported_batch_total_tokens) => { // Warn if user added his own max-batch-total-tokens as we will ignore it if max_batch_total_tokens.is_some() { tracing::warn!( "`--max-batch-total-tokens` is deprecated for Flash \ Attention models." ); tracing::warn!( "Inferred max batch total tokens: {max_supported_batch_total_tokens}" ); } if shard_max_total_tokens > max_supported_batch_total_tokens { return Err(V3Error::NotEnoughMemory(shard_max_total_tokens as usize)); } Ok(( max_supported_batch_total_tokens, shard_max_input_tokens as usize, shard_max_total_tokens as usize, )) } } }; let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .map_err(V3Error::Connection)?; // server is running on v3 // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .map_err(V3Error::Cache)?; // Get info from the shard let shard_info = sharded_client.info().await.map_err(V3Error::Info)?; // Warmup model tracing::info!("Warming up model"); let answer = sharded_client .warmup( max_input_tokens.map(|p| p as u32), max_batch_prefill_tokens, max_total_tokens.map(|p| p as u32), max_batch_size, ) .await .map_err(V3Error::Warmup)?; let (max_batch_total_tokens, max_input_tokens, max_total_tokens) = check_max_batch_total_tokens(answer)?; tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}"); metrics::gauge!("tgi_batch_max_total_tokens").set(max_batch_total_tokens); let backend_info = BackendInfo { waiting_served_ratio, max_batch_total_tokens, max_input_tokens, max_total_tokens, max_waiting_tokens, max_batch_size, model_device_type: shard_info.device_type.clone(), model_dtype: shard_info.dtype.clone(), speculate: shard_info.speculate as usize, support_chunking: shard_info.support_chunking, prefix_caching: shard_info.use_prefix_caching, attention_impl: shard_info.attention_impl.clone(), block_size: shard_info.block_size, }; let backend = BackendV3::new( sharded_client, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, shard_info, ); tracing::info!("Using backend V3"); Ok((backend, backend_info)) } #[derive(Debug, Error)] pub enum V3Error { #[error("Unable to clear the Python model shards cache: {0}")] Cache(ClientError), #[error("Unable to connect to the Python model shards: {0}")] Connection(ClientError), #[error("Unable to get the Python model shards info: {0}")] Info(ClientError), #[error("Unable to warmup the Python model shards: {0}")] Warmup(ClientError), #[error("Not enough memory to handle `max_total_tokens={0}`")] NotEnoughMemory(usize), }
text-generation-inference/backends/v3/src/lib.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 3087 }
- sections: - local: index title: Text Generation Inference - local: quicktour title: Quick Tour - local: supported_models title: Supported Models - local: installation_nvidia title: Using TGI with Nvidia GPUs - local: installation_amd title: Using TGI with AMD GPUs - local: installation_gaudi title: Using TGI with Intel Gaudi - local: installation_inferentia title: Using TGI with AWS Inferentia - local: installation_tpu title: Using TGI with Google TPUs - local: installation_intel title: Using TGI with Intel GPUs - local: installation title: Installation from source - local: multi_backend_support title: Multi-backend support - local: architecture title: Internal Architecture - local: usage_statistics title: Usage Statistics title: Getting started - sections: - local: basic_tutorials/consuming_tgi title: Consuming TGI - local: basic_tutorials/preparing_model title: Preparing Model for Serving - local: basic_tutorials/gated_model_access title: Serving Private & Gated Models - local: basic_tutorials/using_cli title: Using TGI CLI - local: basic_tutorials/non_core_models title: Non-core Model Serving - local: basic_tutorials/safety title: Safety - local: basic_tutorials/using_guidance title: Using Guidance, JSON, tools - local: basic_tutorials/visual_language_models title: Visual Language Models - local: basic_tutorials/monitoring title: Monitoring TGI with Prometheus and Grafana - local: basic_tutorials/train_medusa title: Train Medusa title: Tutorials - sections: - local: backends/trtllm title: TensorRT-LLM title: Backends - sections: - local: reference/launcher title: All TGI CLI options - local: reference/metrics title: Exported Metrics - local: reference/api_reference title: API Reference title: Reference - sections: - local: conceptual/chunking title: V3 update, caching and chunking - local: conceptual/streaming title: Streaming - local: conceptual/quantization title: Quantization - local: conceptual/tensor_parallelism title: Tensor Parallelism - local: conceptual/paged_attention title: PagedAttention - local: conceptual/safetensors title: Safetensors - local: conceptual/flash_attention title: Flash Attention - local: conceptual/speculation title: Speculation (Medusa, ngram) - local: conceptual/guidance title: How Guidance Works (via outlines) - local: conceptual/lora title: LoRA (Low-Rank Adaptation) - local: conceptual/external title: External Resources title: Conceptual Guides
text-generation-inference/docs/source/_toctree.yml/0
{ "file_path": "text-generation-inference/docs/source/_toctree.yml", "repo_id": "text-generation-inference", "token_count": 864 }
# Guidance ## What is Guidance? Guidance is a feature that allows users to constrain the generation of a large language model with a specified grammar. This feature is particularly useful when you want to generate text that follows a specific structure or uses a specific set of words or produce output in a specific format. A prominent example is JSON grammar, where the model is forced to output valid JSON. ## How is it used? Guidance can be implemented in many ways and the community is always finding new ways to use it. Here are some examples of how you can use guidance: Technically, guidance can be used to generate: - a specific JSON object - a function signature - typed output like a list of integers However these use cases can span a wide range of applications, such as: - extracting structured data from unstructured text - summarizing text into a specific format - limit output to specific classes of words (act as a LLM powered classifier) - generate the input to specific APIs or services - provide reliable and consistent output for downstream tasks - extract data from multimodal inputs ## How it works? Diving into the details, guidance is enabled by including a grammar with a generation request that is compiled, and used to modify the chosen tokens. This process can be broken down into the following steps: 1. A request is sent to the backend, it is processed and placed in batch. Processing includes compiling the grammar into a finite state machine and a grammar state. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch-dark.gif" /> </div> 2. The model does a forward pass over the batch. This returns probabilities for each token in the vocabulary for each request in the batch. 3. The process of choosing one of those tokens is called `sampling`. The model samples from the distribution of probabilities to choose the next token. In TGI all of the steps before sampling are called `processor`. Grammars are applied as a processor that masks out tokens that are not allowed by the grammar. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask-dark.gif" /> </div> 4. The grammar mask is applied and the model samples from the remaining tokens. Once a token is chosen, we update the grammar state with the new token, to prepare it for the next pass. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits-dark.gif" /> </div> ## How to use Guidance? There are two main ways to use guidance; you can either use the `/generate` endpoint with a grammar or use the `/chat/completion` endpoint with tools. Under the hood tools are a special case of grammars that allows the model to choose one or none of the provided tools. Please refer to [using guidance](../basic_tutorials/using_guidance) for more examples and details on how to use guidance in Python, JavaScript, and cURL. ### Getting the most out of guidance Depending on how you are using guidance, you may want to make use of different features. Here are some tips to get the most out of guidance: - If you are using the `/generate` with a `grammar` it is recommended to include the grammar in the prompt prefixed by something like `Please use the following JSON schema to generate the output:`. This will help the model understand the context of the grammar and generate the output accordingly. - If you are getting a response with many repeated tokens, please use the `frequency_penalty` or `repetition_penalty` to reduce the number of repeated tokens in the output.
text-generation-inference/docs/source/conceptual/guidance.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/guidance.md", "repo_id": "text-generation-inference", "token_count": 1237 }
# Multi-backend support TGI (Text Generation Inference) offers flexibility by supporting multiple backends for serving large language models (LLMs). With multi-backend support, you can choose the backend that best suits your needs, whether you prioritize performance, ease of use, or compatibility with specific hardware. API interaction with TGI remains consistent across backends, allowing you to switch between them seamlessly. **Supported backends:** * **TGI CUDA backend**: This high-performance backend is optimized for NVIDIA GPUs and serves as the default option within TGI. Developed in-house, it boasts numerous optimizations and is used in production by various projects, including those by Hugging Face. * **[TGI TRTLLM backend](./backends/trtllm)**: This backend leverages NVIDIA's TensorRT library to accelerate LLM inference. It utilizes specialized optimizations and custom kernels for enhanced performance. However, it requires a model-specific compilation step for each GPU architecture.
text-generation-inference/docs/source/multi_backend_support.md/0
{ "file_path": "text-generation-inference/docs/source/multi_backend_support.md", "repo_id": "text-generation-inference", "token_count": 223 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29946, "logprob": -1.4765625, "special": false, "text": "4" }, { "id": 29906, "logprob": -0.9199219, "special": false, "text": "2" }, { "id": 29889, "logprob": 0.0, "special": false, "text": "." }, { "id": 29896, "logprob": -1.1367188, "special": false, "text": "1" }, { "id": 29889, "logprob": -1.4648438, "special": false, "text": "." }, { "id": 29896, "logprob": -0.40722656, "special": false, "text": "1" }, { "id": 29889, "logprob": -0.17419434, "special": false, "text": "." }, { "id": 29896, "logprob": -0.20251465, "special": false, "text": "1" }, { "id": 29900, "logprob": -1.5527344, "special": false, "text": "0" }, { "id": 29896, "logprob": -1.3710938, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": "42.1.1.101" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json", "repo_id": "text-generation-inference", "token_count": 860 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 1313, "logprob": -2.3613281, "special": false, "text": "It" }, { "id": 3969, "logprob": -0.7285156, "special": false, "text": " seems" }, { "id": 298, "logprob": -1.3466797, "special": false, "text": " to" }, { "id": 528, "logprob": 0.0, "special": false, "text": " me" }, { "id": 28725, "logprob": -1.6757812, "special": false, "text": "," }, { "id": 369, "logprob": 0.0, "special": false, "text": " that" }, { "id": 513, "logprob": -1.1269531, "special": false, "text": " if" }, { "id": 368, "logprob": 0.0, "special": false, "text": " you" }, { "id": 28742, "logprob": -2.4921875, "special": false, "text": "'" }, { "id": 267, "logprob": 0.0, "special": false, "text": "re" } ], "top_tokens": null }, "generated_text": "What is gradient descent?\n\nIt seems to me, that if you're" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json", "repo_id": "text-generation-inference", "token_count": 858 }
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 6, "prefill": [], "seed": 0, "tokens": [ { "id": 284, "logprob": -0.28955078, "special": false, "text": " to" }, { "id": 3758, "logprob": -0.7739258, "special": false, "text": " send" }, { "id": 1366, "logprob": -0.85253906, "special": false, "text": " data" }, { "id": 625, "logprob": -0.8984375, "special": false, "text": " over" }, { "id": 257, "logprob": -1.0830078, "special": false, "text": " a" }, { "id": 3127, "logprob": -1.9404297, "special": false, "text": " network" } ], "top_tokens": null }, "generated_text": "Test request to send data over a network" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json", "repo_id": "text-generation-inference", "token_count": 568 }
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 288, "logprob": -0.2854004, "special": false, "text": "ing" }, { "id": 264, "logprob": -0.38061523, "special": false, "text": " a" }, { "id": 633, "logprob": -0.09301758, "special": false, "text": " new" }, { "id": 4480, "logprob": -0.26782227, "special": false, "text": " feature" }, { "id": 297, "logprob": -0.8510742, "special": false, "text": " in" }, { "id": 272, "logprob": -0.13464355, "special": false, "text": " the" }, { "id": 2039, "logprob": 0.0, "special": false, "text": " game" }, { "id": 28723, "logprob": -0.89990234, "special": false, "text": "." }, { "id": 13, "logprob": 0.0, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.10632324, "special": false, "text": "\n" } ], "top_tokens": null }, "generated_text": "Test requesting a new feature in the game.\n\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_all_params.json", "repo_id": "text-generation-inference", "token_count": 860 }
import pytest @pytest.fixture(scope="module") def compressed_tensors_w8an_handle(launcher): with launcher( "neuralmagic/Llama-3.2-1B-Instruct-FP8", num_shard=2, quantize="compressed-tensors", ) as handle: yield handle @pytest.fixture(scope="module") async def compressed_tensors_w8an(compressed_tensors_w8an_handle): await compressed_tensors_w8an_handle.health(300) return compressed_tensors_w8an_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_w8an(compressed_tensors_w8an, response_snapshot): response = await compressed_tensors_w8an.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True, ) assert ( response.generated_text == " Deep learning is a type of artificial intelligence (AI" ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_compressed_tensors_w8an_all_params( compressed_tensors_w8an, response_snapshot ): response = await compressed_tensors_w8an.generate( "What is deep learning", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is deep learning?\nDeep learning, also known as neural network or" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_compressed_tensors_w8an_load( compressed_tensors_w8an, generate_load, response_snapshot ): responses = await generate_load( compressed_tensors_w8an, "What is deep learning?", max_new_tokens=10, n=4, ) assert ( responses[0].generated_text == " Deep learning is a type of artificial intelligence (AI" ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_compressed_tensors_w8an_fp.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_compressed_tensors_w8an_fp.py", "repo_id": "text-generation-inference", "token_count": 1000 }
import pytest @pytest.fixture(scope="module") def flash_llama_fp8_kv_cache_handle(launcher): with launcher( "neuralmagic/Meta-Llama-3-8B-Instruct-FP8-KV", num_shard=2, kv_cache_dtype="fp8_e4m3fn", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_fp8_kv_cache(flash_llama_fp8_kv_cache_handle): await flash_llama_fp8_kv_cache_handle.health(300) return flash_llama_fp8_kv_cache_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8_kv_cache(flash_llama_fp8_kv_cache, response_snapshot): response = await flash_llama_fp8_kv_cache.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True ) assert ( response.generated_text == " Deep learning is a subset of machine learning that involves" ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8_kv_cache_all_params( flash_llama_fp8_kv_cache, response_snapshot ): response = await flash_llama_fp8_kv_cache.generate( "What is deep learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_fp8_kv_cache_load( flash_llama_fp8_kv_cache, generate_load, response_snapshot ): responses = await generate_load( flash_llama_fp8_kv_cache, "What is deep learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert ( responses[0].generated_text == " Deep learning is a subset of machine learning that involves" ) assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"Different messages : {[r.generated_text for r in responses]}" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_llama_fp8_kv_cache.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_llama_fp8_kv_cache.py", "repo_id": "text-generation-inference", "token_count": 986 }
import pytest @pytest.fixture(scope="module") def flash_phi35_moe_handle(launcher): with launcher( "microsoft/Phi-3.5-MoE-instruct", num_shard=4, ) as handle: yield handle @pytest.fixture(scope="module") async def flash_phi35_moe(flash_phi35_moe_handle): await flash_phi35_moe_handle.health(300) return flash_phi35_moe_handle.client @pytest.mark.asyncio async def test_flash_phi35_moe(flash_phi35_moe, response_snapshot): response = await flash_phi35_moe.generate( "What is gradient descent?\n\n", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "Gradient descent is an optimization algorithm commonly used in" ) assert response == response_snapshot @pytest.mark.asyncio async def test_flash_phi35_moe_all_params(flash_phi35_moe, response_snapshot): response = await flash_phi35_moe.generate( "What is gradient descent?\n", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "What is gradient descent?\nGradient Descent (GD) is an" ) assert response == response_snapshot @pytest.mark.asyncio async def test_flash_phi35_moe_load(flash_phi35_moe, generate_load, response_snapshot): responses = await generate_load( flash_phi35_moe, "What is gradient descent?\n\n", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert responses[0].details.generated_tokens == 10 assert ( responses[0].generated_text == "Gradient descent is an optimization algorithm commonly used in" ) assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_phi35_moe.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_phi35_moe.py", "repo_id": "text-generation-inference", "token_count": 921 }
import pytest import asyncio @pytest.fixture(scope="module") def mllama_handle(launcher): with launcher( "meta-llama/Llama-3.2-11B-Vision-Instruct", num_shard=2, ) as handle: yield handle @pytest.fixture(scope="module") async def mllama(mllama_handle): await mllama_handle.health(300) return mllama_handle.client @pytest.mark.asyncio async def test_mllama_simpl(mllama, response_snapshot): response = await mllama.chat( max_tokens=10, temperature=0.0, messages=[ { "role": "user", "content": [ { "type": "text", "text": "Can you tell me a very short story based on the image?", }, { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png" }, }, ], }, ], ) assert response.usage == { "completion_tokens": 10, "prompt_tokens": 50, "total_tokens": 60, } assert ( response.choices[0].message.content == "In a small town, a chicken named Cluck" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_mllama_load(mllama, generate_load, response_snapshot): futures = [ mllama.chat( max_tokens=10, temperature=0.0, messages=[ { "role": "user", "content": [ { "type": "text", "text": "Can you tell me a very short story based on the image?", }, { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/huggingface/text-generation-inference/main/integration-tests/images/chicken_on_money.png" }, }, ], }, ], ) # TODO with v3, 4 breaks here. Nothing accounts of the image VRAM # because mllama is the only one doing its thing. for i in range(2) ] responses = await asyncio.gather(*futures) generated_texts = [response.choices[0].message.content for response in responses] # XXX: TODO: Fix this test. assert generated_texts[0] == "In a small town, a chicken named Cluck" assert len(generated_texts) == 2 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_mllama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_mllama.py", "repo_id": "text-generation-inference", "token_count": 1587 }
pub fn get_cuda_capability() -> Option<(usize, usize)> { use pyo3::prelude::*; let py_get_capability = |py: Python| -> PyResult<(isize, isize)> { let torch = py.import_bound("torch.cuda")?; let get_device_capability = torch.getattr("get_device_capability")?; get_device_capability.call0()?.extract() }; match pyo3::Python::with_gil(py_get_capability) { Ok((major, minor)) if major < 0 || minor < 0 => { tracing::warn!("Ignoring negative GPU compute capabilities: {major}.{minor}"); None } Ok((major, minor)) => Some((major as usize, minor as usize)), Err(err) => { tracing::warn!("Cannot determine GPU compute capability: {}", err); None } } }
text-generation-inference/launcher/src/gpu.rs/0
{ "file_path": "text-generation-inference/launcher/src/gpu.rs", "repo_id": "text-generation-inference", "token_count": 350 }
final: prev: { # You can use this overlay to temporarily override packages for # development. For permanent overrides, it's better to do this in # our package flake: # # https://github.com/huggingface/text-generation-inference-nix # # Note that overriding packages that are in the transitive closure # of many other packages (e.g. transformers) will require a large # rebuild. pythonPackagesExtensions = prev.pythonPackagesExtensions ++ [ ( python-self: python-super: with python-self; { # Python package override example: # transformers = python-super.transformers.overrideAttrs ( # _: _: { # src = final.fetchFromGitHub { # owner = "huggingface"; # repo = "transformers"; # rev = "2bd4d5897dc73e8b172832070a6f9e567a0df017"; # hash = "sha256-JOIpKH9ssDEfI2Tf15e0iPKtThJwQ9GxMvRAnm+M2Pg="; # }; # } # ); } ) ]; # Non-python package override example: # # ripgrep = prev.ripgrep.overrideAttrs ( # _: _: { # src = final.fetchFromGitHub { # owner = "BurntSushi"; # repo = "ripgrep"; # rev = "79cbe89deb1151e703f4d91b19af9cdcc128b765"; # hash = "sha256-JPTM2KNmGMb+/jOfK3X7OM1wnN+3TU35SJOIcqmp3mg="; # }; # }); }
text-generation-inference/nix/overlay.nix/0
{ "file_path": "text-generation-inference/nix/overlay.nix", "repo_id": "text-generation-inference", "token_count": 633 }
use crate::config::Config; use clap::ValueEnum; use csv::ReaderBuilder; use reqwest::header::HeaderMap; use serde::Serialize; use std::{ fs::File, io::{self, BufRead}, path::Path, process::Command, time::Duration, }; use uuid::Uuid; const TELEMETRY_URL: &str = "https://huggingface.co/api/telemetry/tgi"; #[derive(Copy, Clone, Debug, Serialize, ValueEnum)] pub enum UsageStatsLevel { On, NoStack, Off, } #[derive(Debug, Clone, Serialize)] pub struct UserAgent { pub uid: String, pub args: Args, pub env: Env, } impl UserAgent { pub fn new(reduced_args: Args) -> Self { Self { uid: Uuid::new_v4().to_string(), args: reduced_args, env: Env::new(), } } } #[derive(Serialize, Debug)] pub enum EventType { Start, Stop, Error, Ping, } #[derive(Debug, Serialize)] pub struct UsageStatsEvent { user_agent: UserAgent, event_type: EventType, #[serde(skip_serializing_if = "Option::is_none")] error_reason: Option<String>, } impl UsageStatsEvent { pub fn new(user_agent: UserAgent, event_type: EventType, error_reason: Option<String>) -> Self { Self { user_agent, event_type, error_reason, } } pub async fn send(&self) { let mut headers = HeaderMap::new(); headers.insert("Content-Type", "application/json".parse().unwrap()); let body = serde_json::to_string(&self).unwrap(); let client = reqwest::Client::new(); let _ = client .post(TELEMETRY_URL) .headers(headers) .body(body) .timeout(Duration::from_secs(10)) .send() .await; } } #[derive(Debug, Clone, Serialize)] pub struct Args { model_config: Option<Config>, tokenizer_class: Option<String>, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_tokens: usize, max_total_tokens: usize, // waiting_served_ratio: f32, // max_batch_prefill_tokens: u32, // max_batch_total_tokens: Option<u32>, // max_waiting_tokens: usize, // max_batch_size: Option<usize>, revision: Option<String>, validation_workers: usize, disable_grammar_support: bool, max_client_batch_size: usize, usage_stats_level: UsageStatsLevel, backend_name: &'static str, } impl Args { #[allow(clippy::too_many_arguments)] pub fn new( model_config: Option<Config>, tokenizer_class: Option<String>, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_tokens: usize, max_total_tokens: usize, // waiting_served_ratio: f32, // max_batch_prefill_tokens: u32, // max_batch_total_tokens: Option<u32>, // max_waiting_tokens: usize, // max_batch_size: Option<usize>, revision: Option<String>, validation_workers: usize, disable_grammar_support: bool, max_client_batch_size: usize, usage_stats_level: UsageStatsLevel, backend_name: &'static str, ) -> Self { Self { model_config, tokenizer_class, max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_tokens, max_total_tokens, // waiting_served_ratio, // max_batch_prefill_tokens, // max_batch_total_tokens, // max_waiting_tokens, // max_batch_size, revision, validation_workers, disable_grammar_support, max_client_batch_size, usage_stats_level, backend_name, } } } /// This is more or less a copy of the code from the `text-generation-launcher` crate to avoid a dependency #[derive(Serialize, Debug, Clone)] pub struct Env { git_sha: &'static str, docker_label: &'static str, nvidia_info: Option<Vec<NvidiaSmiInfo>>, xpu_info: Option<Vec<XpuSmiInfo>>, system_env: SystemInfo, } #[derive(Debug, Serialize, Clone)] struct NvidiaSmiInfo { name: String, pci_bus_id: String, driver_version: String, pstate: String, pcie_link_gen_max: String, pcie_link_gen_current: String, temperature_gpu: String, utilization_gpu: String, utilization_memory: String, memory_total: String, memory_free: String, memory_used: String, reset_status_reset_required: String, reset_status_drain_and_reset_recommended: String, compute_cap: String, ecc_errors_corrected_volatile_total: String, mig_mode_current: String, power_draw_instant: String, power_limit: String, } impl NvidiaSmiInfo { fn new() -> Option<Vec<NvidiaSmiInfo>> { let output = Command::new("nvidia-smi") .args([ "--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.gpucurrent,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used,reset_status.reset_required,reset_status.drain_and_reset_recommended,compute_cap,ecc.errors.corrected.volatile.total,mig.mode.current,power.draw.instant,power.limit", "--format=csv" ]) .output() .ok()?; if !output.status.success() { return None; } let stdout = String::from_utf8(output.stdout).ok()?; let mut rdr = ReaderBuilder::new() .has_headers(true) .from_reader(stdout.as_bytes()); let mut infos = Vec::new(); for result in rdr.records() { let record = result.ok()?; infos.push(NvidiaSmiInfo { name: record[0].to_string(), pci_bus_id: record[1].to_string(), driver_version: record[2].to_string(), pstate: record[3].to_string(), pcie_link_gen_max: record[4].to_string(), pcie_link_gen_current: record[5].to_string(), temperature_gpu: record[6].to_string(), utilization_gpu: record[7].to_string(), utilization_memory: record[8].to_string(), memory_total: record[9].to_string(), memory_free: record[10].to_string(), memory_used: record[11].to_string(), reset_status_reset_required: record[12].to_string(), reset_status_drain_and_reset_recommended: record[13].to_string(), compute_cap: record[14].to_string(), ecc_errors_corrected_volatile_total: record[15].to_string(), mig_mode_current: record[16].to_string(), power_draw_instant: record[17].to_string(), power_limit: record[18].to_string(), }); } Some(infos) } } #[derive(Debug, Serialize, Clone)] struct XpuSmiInfo { device_id: usize, gpu_utilization: f32, gpu_power: f32, gpu_core_temperature: f32, gpu_memory_bandwidth_utilization: f32, } impl XpuSmiInfo { /// based on this https://github.com/intel/xpumanager/blob/master/doc/smi_user_guide.md#dump-the-device-statistics-in-csv-format fn new() -> Option<Vec<XpuSmiInfo>> { let output = Command::new("xpu-smi") .args([ "dump", "-d", "-1", "-m", "0,1,3,17", // Metrics IDs: GPU Utilization, GPU Power, GPU Core Temperature, GPU Memory Bandwidth Utilization "-n", "1", "-j", ]) .output() .ok()?; if !output.status.success() { return None; } let stdout = String::from_utf8(output.stdout).ok()?; let mut infos = Vec::new(); let json_data: serde_json::Value = match serde_json::from_str(&stdout) { Ok(data) => data, Err(_) => return None, }; if let Some(metrics_data) = json_data.as_array() { for entry in metrics_data { let device_id = entry["deviceId"].as_u64()? as usize; let gpu_utilization = entry["metrics"][0].as_f64()? as f32; let gpu_power = entry["metrics"][1].as_f64()? as f32; let gpu_core_temperature = entry["metrics"][2].as_f64()? as f32; let gpu_memory_bandwidth_utilization = entry["metrics"][3].as_f64()? as f32; infos.push(XpuSmiInfo { device_id, gpu_utilization, gpu_power, gpu_core_temperature, gpu_memory_bandwidth_utilization, }); } } Some(infos) } } #[derive(Serialize, Debug, Clone)] pub struct SystemInfo { cpu_count: usize, cpu_type: String, total_memory: u64, architecture: String, platform: String, } impl SystemInfo { fn new() -> Self { let mut system = sysinfo::System::new_all(); system.refresh_all(); let cpu_count = system.cpus().len(); let cpu_type = system.cpus()[0].brand().to_string(); let total_memory = system.total_memory(); let architecture = std::env::consts::ARCH.to_string(); let platform = format!( "{}-{}-{}", std::env::consts::OS, std::env::consts::FAMILY, std::env::consts::ARCH ); Self { cpu_count, cpu_type, total_memory, architecture, platform, } } } impl Default for Env { fn default() -> Self { Self::new() } } impl Env { pub fn new() -> Self { Self { system_env: SystemInfo::new(), nvidia_info: NvidiaSmiInfo::new(), xpu_info: XpuSmiInfo::new(), git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"), docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"), } } } pub fn is_container() -> io::Result<bool> { let path = Path::new("/proc/self/cgroup"); let file = File::open(path)?; let reader = io::BufReader::new(file); for line in reader.lines() { let line = line?; // Check for common container runtimes if line.contains("/docker/") || line.contains("/docker-") || line.contains("/kubepods/") || line.contains("/kubepods-") || line.contains("containerd") || line.contains("crio") || line.contains("podman") { return Ok(true); } } Ok(false) }
text-generation-inference/router/src/usage_stats.rs/0
{ "file_path": "text-generation-inference/router/src/usage_stats.rs", "repo_id": "text-generation-inference", "token_count": 5315 }
# Text Generation Inference Python gRPC Server A Python gRPC server for Text Generation Inference ## Install ```shell make install ``` ## Run ```shell make run-dev ```
text-generation-inference/server/README.md/0
{ "file_path": "text-generation-inference/server/README.md", "repo_id": "text-generation-inference", "token_count": 56 }
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _matrix_cuh #define _matrix_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> class MatrixView_half { public: const half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half(const half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half2 item_half2half2(int row, int column) const { return __half2half2(data[row * width + column]); } __device__ __forceinline__ const half* item_ptr(int row, int column) const { return &data[row * width + column]; } }; class MatrixView_half_rw { public: half* data; const int height; const int width; __device__ __forceinline__ MatrixView_half_rw(half* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ half item(int row, int column) const { return data[row * width + column]; } __device__ __forceinline__ half2 item_half2(int row, int column) const { return ((half2*)data)[(row * width + column) / 2]; } __device__ __forceinline__ half* item_ptr(int row, int column) { return &data[row * width + column]; } __device__ __forceinline__ void set(int row, int column, half value) { data[row * width + column] = value; } __device__ __forceinline__ void set_half2(int row, int column, half2 value) { ((half2*)data)[(row * width + column) / 2] = value; } }; class MatrixView_q4_row { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_row(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (column & 0x07) * 4; return (data[row * width / 8 + column / 8] >> shift) & 0x0f; } }; class MatrixView_q4_column { public: const uint32_t* data; const int height; const int width; __device__ __forceinline__ MatrixView_q4_column(const uint32_t* data, const int height, const int width) : data(data), height(height), width(width) { } __device__ __forceinline__ int item(int row, int column) const { int shift = (row & 0x07) * 4; return (data[row / 8 * width + column] >> shift) & 0x0f; } __device__ __forceinline__ uint32_t item_uint32_t(int row, int column) { return data[row / 8 * width + column]; } __device__ __forceinline__ const uint32_t* item_uint32_ptr(int row, int column) { return &data[row / 8 * width + column]; } }; // TODO: Rewrite all these dot product functions using functors or something, move to q4_matmul.cu // Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale __device__ __forceinline__ half2 dot_product_8 ( const half2 acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half2 v_scale_2, const uint32_t v_zero, // + 1 (!!) const int count ) { const half2* h_ptr = (const half2*) h_.item_ptr(h_row, h_column); const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half2 result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half2 v_01 = __halves2half2(v_0, v_1); half2 v_23 = __halves2half2(v_2, v_3); half2 v_45 = __halves2half2(v_4, v_5); half2 v_67 = __halves2half2(v_6, v_7); // half2 v_01 = q4_table[v_zero - 1][(v_read ) & 0xff]; // (constant memory is too slow apparently) // half2 v_23 = q4_table[v_zero - 1][(v_read >> 8) & 0xff]; // half2 v_45 = q4_table[v_zero - 1][(v_read >> 16) & 0xff]; // half2 v_67 = q4_table[v_zero - 1][(v_read >> 24) ]; half2 tmp = __hmul2(*h_ptr++, v_01); tmp = __hfma2(*h_ptr++, v_23, tmp); tmp = __hfma2(*h_ptr++, v_45, tmp); tmp = __hfma2(*h_ptr++, v_67, tmp); result = __hfma2(v_scale_2, tmp, result); } return result; } __device__ __forceinline__ half dot_product_8_h ( const half acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half v_scale, const uint32_t v_zero, // + 1 (!!) const int count ) { const half* h_ptr = h_.item_ptr(h_row, h_column); const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half tmp = __hmul(*h_ptr++, v_0); tmp = __hfma(*h_ptr++, v_1, tmp); tmp = __hfma(*h_ptr++, v_2, tmp); tmp = __hfma(*h_ptr++, v_3, tmp); tmp = __hfma(*h_ptr++, v_4, tmp); tmp = __hfma(*h_ptr++, v_5, tmp); tmp = __hfma(*h_ptr++, v_6, tmp); tmp = __hfma(*h_ptr++, v_7, tmp); result = __hfma(v_scale, tmp, result); } return result; } // Accumulated dot product of 8-element row vectors in h and quantized column vectors in v, constant zero/scale, with x_map __device__ __forceinline__ half2 dot_product_8_x_map ( const half2 acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half2 v_scale_2, const uint32_t v_zero, // + 1 (!!) const int count, const uint32_t* x_map ) { const half* h_ptr = h_.item_ptr(h_row, 0); const uint32_t* x_map_ptr = x_map + h_column; const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half2 result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half2 v_01 = __halves2half2(v_0, v_1); half2 v_23 = __halves2half2(v_2, v_3); half2 v_45 = __halves2half2(v_4, v_5); half2 v_67 = __halves2half2(v_6, v_7); half h_0 = h_ptr[*x_map_ptr++]; half h_1 = h_ptr[*x_map_ptr++]; half h_2 = h_ptr[*x_map_ptr++]; half h_3 = h_ptr[*x_map_ptr++]; half h_4 = h_ptr[*x_map_ptr++]; half h_5 = h_ptr[*x_map_ptr++]; half h_6 = h_ptr[*x_map_ptr++]; half h_7 = h_ptr[*x_map_ptr++]; half2 h_01 = __halves2half2(h_0, h_1); half2 h_23 = __halves2half2(h_2, h_3); half2 h_45 = __halves2half2(h_4, h_5); half2 h_67 = __halves2half2(h_6, h_7); half2 tmp = __hmul2(h_01, v_01); tmp = __hfma2(h_23, v_23, tmp); tmp = __hfma2(h_45, v_45, tmp); tmp = __hfma2(h_67, v_67, tmp); result = __hfma2(v_scale_2, tmp, result); } return result; } __device__ __forceinline__ half dot_product_8_x_map_h ( const half acc, MatrixView_half& h_, const int h_row, const int h_column, // divisible by 8 MatrixView_q4_column& v_, const int v_row, // divisible by 8 const int v_column, const half v_scale, const uint32_t v_zero, // + 1 (!!) const int count, const uint32_t* x_map ) { const half* h_ptr = h_.item_ptr(h_row, 0); const uint32_t* x_map_ptr = x_map + h_column; const uint32_t* v_ptr = (const uint32_t*) v_.item_uint32_ptr(v_row, v_column); half result = acc; for (int i = 0; i < count; i++) { uint32_t v_read = *v_ptr; v_ptr += v_.width; half v_0 = __int2half_rn((int)((v_read ) & 0x0f) - v_zero); half v_1 = __int2half_rn((int)((v_read >> 4) & 0x0f) - v_zero); half v_2 = __int2half_rn((int)((v_read >> 8) & 0x0f) - v_zero); half v_3 = __int2half_rn((int)((v_read >> 12) & 0x0f) - v_zero); half v_4 = __int2half_rn((int)((v_read >> 16) & 0x0f) - v_zero); half v_5 = __int2half_rn((int)((v_read >> 20) & 0x0f) - v_zero); half v_6 = __int2half_rn((int)((v_read >> 24) & 0x0f) - v_zero); half v_7 = __int2half_rn((int)((v_read >> 28) ) - v_zero); half tmp = __hmul(h_ptr[*x_map_ptr++], v_0); tmp = __hfma(h_ptr[*x_map_ptr++], v_1, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_2, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_3, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_4, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_5, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_6, tmp); tmp = __hfma(h_ptr[*x_map_ptr++], v_7, tmp); result = __hfma(v_scale, tmp, result); } return result; } #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/matrix.cuh", "repo_id": "text-generation-inference", "token_count": 5380 }
#ifndef _qdq_4_cuh #define _qdq_4_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_4BIT == 1 // Permutation: // // 77775555 33331111 66664444 22220000 __forceinline__ __device__ void shuffle_4bit_8 ( uint32_t* q, int stride ) { uint32_t qa = q[0]; uint32_t qb = 0; #pragma unroll for (int i = 0; i < 4; i++) { uint32_t qa0 = qa & 0x0f; uint32_t qa1 = (qa & 0xf0) >> 4; qa >>= 8; qb |= (qa1 << (i * 4 + 16)); qb |= (qa0 << (i * 4)); } q[0] = qb; } __forceinline__ __device__ void dequant_4bit_8 ( const uint32_t q_0, half2 (&dq)[4], int stride ) { const uint32_t c0 = 0x64006400; const half y16_ = __float2half_rn(1.0f / 16.0f); const half2 y16 = __halves2half2(y16_, y16_); const half z1_ = __float2half_rn(-1024.0f - 8.0f); const half z16_ = __float2half_rn(-1024.0f / 16.0f - 8.0f); const half2 z1 = __halves2half2(z1_, z1_); const half2 z16 = __halves2half2(z16_, z16_); uint32_t qa = q_0; half2_uint32 q0((qa & 0x000f000f) | c0); // half2(q[ 0], q[ 1]) + 1024 half2_uint32 q1((qa & 0x00f000f0) | c0); // half2(q[ 2], q[ 3]) * 16 + 1024 qa >>= 8; half2_uint32 q2((qa & 0x000f000f) | c0); // half2(q[ 4], q[ 5]) + 1024 half2_uint32 q3((qa & 0x00f000f0) | c0); // half2(q[ 6], q[ 7]) * 16 + 1024 dq[0] = __hadd2(q0.as_half2, z1); dq[1] = __hfma2(q1.as_half2, y16, z16); dq[2] = __hadd2(q2.as_half2, z1); dq[3] = __hfma2(q3.as_half2, y16, z16); } __forceinline__ __device__ void dequant_4bit_8_prep_zero_scale ( const uint32_t zero, const half scale, half2 (&z1z16)[2], half2 (&y1y16)[2] ) { half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); half2 scale2 = __half2half2(scale); z1z16[0] = __hmul2(scale2, __half2half2(z1.as_half)); z1z16[1] = __hmul2(scale2, __half2half2(z16)); const half y1 = __float2half_rn(1.0f); const half y16 = __float2half_rn(1.0f / 16.0f); y1y16[0] = __hmul2(scale2, __half2half2(y1)); y1y16[1] = __hmul2(scale2, __half2half2(y16)); } __forceinline__ __device__ void dequant_4bit_8_prep_zero ( const uint32_t zero, half2(&z1z16)[2], half2(&y1y16)[2] ) { half_uint16 z1(0xe400 | zero); // half(-1024.0f - zero); half z16 = __hsub(__int2half_rn(-64), __int2half_rn(zero)); z1z16[0] = __half2half2(z1.as_half); z1z16[1] = __half2half2(z16); const half y1 = __float2half_rn(1.0f); const half y16 = __float2half_rn(1.0f / 16.0f); y1y16[0] = __half2half2(y1); y1y16[1] = __half2half2(y16); } __forceinline__ __device__ void dequant_4bit_8_gptq ( const uint32_t q_0, half2 (&dq)[4], half2 (&z1z16)[2], half2 (&y1y16)[2], int stride, bool scaled ) { const uint32_t c0 = 0x64006400; uint32_t qa = q_0; half2_uint32 q0((qa & 0x000f000f) | c0); // half2( q[0] + 1024, q[1] + 1024 ) half2_uint32 q1((qa & 0x00f000f0) | c0); // half2( q[2] * 16 + 1024, q[3] * 16 + 1024 ) qa >>= 8; half2_uint32 q2((qa & 0x000f000f) | c0); // half2( q[4] + 1024, q[5] + 1024 ) half2_uint32 q3((qa & 0x00f000f0) | c0); // half2( q[6] * 16 + 1024, q[7] * 16 + 1024 ) if (scaled) { dq[0] = __hfma2(q0.as_half2, y1y16[0], z1z16[0]); // half2( q[0] * s - z * s, q[1] * s - z * s) dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] * s - z * s, q[3] * s - z * s) dq[2] = __hfma2(q2.as_half2, y1y16[0], z1z16[0]); dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); } else { dq[0] = __hadd2(q0.as_half2, z1z16[0]); // half2( q[0] - z, q[1] - z ) dq[1] = __hfma2(q1.as_half2, y1y16[1], z1z16[1]); // half2( q[2] - z, q[3] - z ) dq[2] = __hadd2(q2.as_half2, z1z16[0]); // half2( q[4] - z, q[5] - z ) dq[3] = __hfma2(q3.as_half2, y1y16[1], z1z16[1]); // half2( q[6] - z, q[7] - z ) } } #else __forceinline__ __device__ void shuffle_4bit_8 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_4bit_8 ( const uint32_t q_0, half2 (&dq)[4], int stride ) { half dqh[8]; for (int i = 0; i < 8; i++) dqh[i] = dq_ns(exb(q_0, i * 4, 0x0f), 8); for (int i = 0; i < 4; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } __forceinline__ __device__ void dequant_4bit_8_prep_zero_scale ( const uint32_t zero, const half scale, half2 (&z1)[2], half2 (&y1)[2] ) { half z = __int2half_rn(-((int)zero)); z = __hmul(z, scale); z1[0] = __half2half2(z); y1[0] = __half2half2(scale); } __forceinline__ __device__ void dequant_4bit_8_prep_zero ( const uint32_t zero, half2(&z1)[2], half2(&y1)[2] ) { half z = __int2half_rn(-((int)zero)); z1[0] = __half2half2(z); } __forceinline__ __device__ void dequant_4bit_8_gptq ( const uint32_t q_0, half2 (&dq)[4], half2 (&z1)[2], half2 (&y1)[2], int stride, bool scaled ) { half2 dqh2[8]; uint32_t qa = q_0; for (int i = 0; i < 4; i++) { half d0 = __int2half_rn(qa & 0x0f); qa >>= 4; half d1 = __int2half_rn(qa & 0x0f); qa >>= 4; dqh2[i] = __halves2half2(d0, d1); } if (scaled) { dq[0] = __hfma2(dqh2[0], y1[0], z1[0]); dq[1] = __hfma2(dqh2[1], y1[0], z1[0]); dq[2] = __hfma2(dqh2[2], y1[0], z1[0]); dq[3] = __hfma2(dqh2[3], y1[0], z1[0]); } else { dq[0] = __hadd2(dqh2[0], z1[0]); dq[1] = __hadd2(dqh2[1], z1[0]); dq[2] = __hadd2(dqh2[2], z1[0]); dq[3] = __hadd2(dqh2[3], z1[0]); } } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_4.cuh", "repo_id": "text-generation-inference", "token_count": 3279 }
import pytest import torch from copy import copy from transformers import AutoTokenizer from text_generation_server.pb import generate_pb2 from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch @pytest.fixture(scope="session") def default_causal_lm(): return CausalLM.fallback("gpt2") @pytest.fixture(scope="session") def gpt2_tokenizer(): tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left") tokenizer.pad_token_id = 50256 return tokenizer @pytest.fixture def default_pb_request(default_pb_parameters, default_pb_stop_parameters): return generate_pb2.Request( id=0, inputs="Test", input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]), prefill_logprobs=True, truncate=100, parameters=default_pb_parameters, stopping_parameters=default_pb_stop_parameters, ) @pytest.fixture def default_pb_batch(default_pb_request): return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1) @pytest.fixture def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer): return CausalLMBatch.from_pb( default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu") ) @pytest.fixture def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer): req_0 = copy(default_pb_request) req_0.id = 1 req_1 = default_pb_request req_1.id = 2 req_1.stopping_parameters.max_new_tokens = 5 batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2) return CausalLMBatch.from_pb( batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu") ) def test_batch_from_pb(default_pb_batch, default_causal_lm_batch): batch = default_causal_lm_batch assert batch.batch_id == default_pb_batch.id assert batch.requests == default_pb_batch.requests assert len(batch.input_ids) == default_pb_batch.size assert batch.input_ids[0][-1] == 14402 assert torch.all(batch.input_ids[0][:-1] == 50256) assert batch.attention_mask[0, 0] == 1 assert torch.all(batch.attention_mask[0, 1:] == 0) assert batch.past_key_values is None assert all( [ torch.equal(input_ids, all_input_ids[:, 0]) for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids) ] ) assert batch.input_lengths == [1] assert len(batch) == default_pb_batch.size assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch) assert batch.max_input_length == batch.input_lengths[0] def test_batch_concatenate_no_prefill(default_causal_lm_batch): with pytest.raises(ValueError): CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch]) def test_causal_lm_batch_type(default_causal_lm): assert default_causal_lm.batch_type == CausalLMBatch def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch): sequence_length = len(default_causal_lm_batch.all_input_ids[0]) generations, next_batch, _ = default_causal_lm.generate_token( default_causal_lm_batch ) assert len(generations) == len(next_batch) assert isinstance(next_batch, CausalLMBatch) assert len(next_batch.all_input_ids) == len(next_batch) assert len(next_batch.all_input_ids[0]) == sequence_length + 1 assert len(next_batch.attention_mask[0]) == 11 assert next_batch.all_input_ids[0][-1] == 13 assert next_batch.all_input_ids[0][-2] == 14402 assert torch.all(next_batch.all_input_ids[0][:-2] == 50256) assert torch.all(next_batch.attention_mask[0][0:2] == 1) assert torch.all(next_batch.attention_mask[0][2:] == 0) assert next_batch.input_ids.shape == (len(next_batch), 1) assert next_batch.input_ids[0, 0] == 13 assert next_batch.input_lengths == [2] assert next_batch.max_input_length == next_batch.input_lengths[0] assert next_batch.past_key_values is not None assert all( [p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values] ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) assert all( [ token_id.item() == 13 for generation in generations for token_id in generation.tokens.token_ids ] ) assert all( [ token_text == "." for generation in generations for token_text in generation.tokens.texts ] ) assert generations[0].request_id == 0 def test_causal_lm_generate_token_completion( default_causal_lm, default_causal_lm_batch ): next_batch = default_causal_lm_batch for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert generations[0].request_id == default_causal_lm_batch.requests[0].id assert ( generations[0].generated_text.generated_tokens == default_causal_lm_batch.stopping_criterias[0].max_new_tokens ) def test_causal_lm_generate_token_completion_multi( default_causal_lm, default_multi_requests_causal_lm_batch ): next_batch = default_multi_requests_causal_lm_batch for i in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[1].generated_text.text == ".java:784)" assert ( generations[1].request_id == default_multi_requests_causal_lm_batch.requests[1].id ) assert ( generations[1].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens ) # Copy stopping_criterias before filtering stopping_criterias = ( default_multi_requests_causal_lm_batch.stopping_criterias.copy() ) next_batch = next_batch.filter([next_batch.requests[0].id]) for _ in range( stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert ( generations[0].request_id == default_multi_requests_causal_lm_batch.requests[0].id ) assert ( generations[0].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens ) def test_batch_concatenate( default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch ): next_batch_0 = default_causal_lm_batch _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_causal_lm_batch _, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1) # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches next_batch_0_past_key_values = [ (k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values ] next_batch_1_past_key_values = [ (k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values ] next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1]) assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0]) assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0]) assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1]) assert torch.all( next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1 ) assert torch.all( next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1 ) assert torch.all(next_batch.attention_mask[1:, 3:] == 0) assert next_batch.batch_id == 0 assert next_batch.input_ids[0, 0] == 12355 assert torch.all(next_batch.input_ids[1:] == 13) assert next_batch.input_lengths == [3, 2, 2] assert next_batch.max_input_length == 3 assert next_batch.requests[0] == next_batch_0.requests[0] assert next_batch.requests[1:] == list(next_batch_1.requests) assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0] assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0] assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias assert next_batch.past_key_values is not None assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values]) assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values]) for i, past in enumerate(next_batch.past_key_values): assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0]) assert torch.equal( next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0]) assert torch.equal( next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :] ) for _ in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 assert generations[2].generated_text.text == ".java:784)" assert ( generations[2].request_id == default_multi_requests_causal_lm_batch.requests[1].id ) assert ( generations[2].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens ) next_batch = next_batch.filter( [next_batch.requests[0].id, next_batch.requests[1].id] ) for _ in range( default_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert generations[0].request_id == default_causal_lm_batch.requests[0].id assert ( generations[0].generated_text.generated_tokens == default_causal_lm_batch.stopping_criterias[0].max_new_tokens ) next_batch = next_batch.filter([next_batch.requests[1].id]) for _ in range( default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 4 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert ( generations[0].request_id == default_multi_requests_causal_lm_batch.requests[0].id ) assert ( generations[0].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens )
text-generation-inference/server/tests/models/test_causal_lm.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 5390 }
import torch from typing import Dict, Optional, TypeVar from text_generation_server.models.types import Batch B = TypeVar("B", bound=Batch) class Cache: def __init__(self): self.cache: Dict[int, B] = {} def pop(self, batch_id: int) -> Optional[B]: return self.cache.pop(batch_id, None) def set(self, entry: B): if entry is not None: self.cache[entry.batch_id] = entry def delete(self, batch_id: int): batch = self.pop(batch_id) if batch is not None: del batch if torch.cuda.is_available(): torch.cuda.empty_cache() def clear(self): keys = list(self.cache.keys()) for k in keys: self.delete(k) def __len__(self): return len(self.cache.keys())
text-generation-inference/server/text_generation_server/cache.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/cache.py", "repo_id": "text-generation-inference", "token_count": 359 }
from dataclasses import dataclass import bitsandbytes as bnb import torch from bitsandbytes.nn import Int8Params, Params4bit from text_generation_server.utils.weights import UnquantizedWeight @dataclass class BNBWeight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear8bitLt(self.weight, bias, has_fp16_weights=False, threshold=6.0) class Linear8bitLt(torch.nn.Module): def __init__( self, weight, bias, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None, ): super().__init__() assert ( not memory_efficient_backward ), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0" self.state = bnb.MatmulLtState() self.index = index # Necessary for stacked layers self.state.threshold = threshold self.state.has_fp16_weights = has_fp16_weights self.state.memory_efficient_backward = memory_efficient_backward if threshold > 0.0 and not has_fp16_weights: self.state.use_pool = True self.weight = Int8Params( weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights, ) self.weight.cuda(weight.device) self.bias = bias def init_8bit_state(self): self.state.CB = self.weight.CB self.state.SCB = self.weight.SCB self.weight.CB = None self.weight.SCB = None def forward(self, x: torch.Tensor): self.state.is_training = self.training if self.weight.CB is not None: self.init_8bit_state() # weights are cast automatically as Int8Params, but the bias has to be cast manually if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) if not self.state.has_fp16_weights: if self.state.CB is not None and self.state.CxB is not None: # we converted 8-bit row major to turing/ampere format in the first inference pass # we no longer need the row-major weight del self.state.CB self.weight.data = self.state.CxB return out @dataclass class BNBFP4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type="fp4") @dataclass class BNBNF4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type="nf4") class Linear4bit(torch.nn.Module): def __init__(self, weight, bias, quant_type): super().__init__() self.weight = Params4bit( weight.data, requires_grad=False, compress_statistics=True, quant_type=quant_type, ) self.compute_dtype = None self.weight.cuda(weight.device) self.bias = bias def forward(self, x: torch.Tensor): # weights are cast automatically as Int8Params, but the bias has to be cast manually if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) if getattr(self.weight, "quant_state", None) is None: print( "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first." ) inp_dtype = x.dtype if self.compute_dtype is not None: x = x.to(self.compute_dtype) bias = None if self.bias is None else self.bias.to(self.compute_dtype) out = bnb.matmul_4bit( x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state ) out = out.to(inp_dtype) return out
text-generation-inference/server/text_generation_server/layers/bnb.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/bnb.py", "repo_id": "text-generation-inference", "token_count": 1825 }
import time import torch.nn as nn import math import json import os import torch import transformers from texttable import Texttable from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer from huggingface_hub import HfApi from accelerate import init_empty_weights from text_generation_server.utils import initialize_torch_distributed, Weights from text_generation_server.utils.hub import weight_files from text_generation_server.layers.gptq import QuantLinear from loguru import logger from typing import Optional from text_generation_server.layers.gptq.utils import torch_snr_error from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight DEV = torch.device("cuda:0") class Quantizer(nn.Module): def __init__(self, shape=1): super(Quantizer, self).__init__() self.register_buffer("maxq", torch.tensor(0)) self.register_buffer("scale", torch.zeros(shape)) self.register_buffer("zero", torch.zeros(shape)) def configure( self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8, trits=False, ): self.maxq = torch.tensor(2**bits - 1) self.perchannel = perchannel self.sym = sym self.mse = mse self.norm = norm self.grid = grid self.maxshrink = maxshrink if trits: self.maxq = torch.tensor(-1) self.scale = torch.zeros_like(self.scale) def _quantize(self, x, scale, zero, maxq): if maxq < 0: return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) def find_params(self, x, weight=False): dev = x.device self.maxq = self.maxq.to(dev) shape = x.shape if self.perchannel: if weight: x = x.flatten(1) else: if len(shape) == 4: x = x.permute([1, 0, 2, 3]) x = x.flatten(1) if len(shape) == 3: x = x.reshape((-1, shape[-1])).t() if len(shape) == 2: x = x.t() else: x = x.flatten().unsqueeze(0) tmp = torch.zeros(x.shape[0], device=dev) xmin = torch.minimum(x.min(1)[0], tmp) xmax = torch.maximum(x.max(1)[0], tmp) if self.sym: xmax = torch.maximum(torch.abs(xmin), xmax) tmp = xmin < 0 if torch.any(tmp): xmin[tmp] = -xmax[tmp] tmp = (xmin == 0) & (xmax == 0) xmin[tmp] = -1 xmax[tmp] = +1 if self.maxq < 0: self.scale = xmax self.zero = xmin else: self.scale = (xmax - xmin) / self.maxq if self.sym: self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) else: self.zero = torch.round(-xmin / self.scale) if self.mse: best = torch.full([x.shape[0]], float("inf"), device=dev) for i in range(int(self.maxshrink * self.grid)): p = 1 - i / self.grid xmin1 = p * xmin xmax1 = p * xmax scale1 = (xmax1 - xmin1) / self.maxq zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero q = self._quantize( x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq ) q -= x q.abs_() q.pow_(self.norm) err = torch.sum(q, 1) tmp = err < best if torch.any(tmp): best[tmp] = err[tmp] self.scale[tmp] = scale1[tmp] self.zero[tmp] = zero1[tmp] if not self.perchannel: if weight: tmp = shape[0] else: tmp = shape[1] if len(shape) != 3 else shape[2] self.scale = self.scale.repeat(tmp) self.zero = self.zero.repeat(tmp) if weight: shape = [-1] + [1] * (len(shape) - 1) self.scale = self.scale.reshape(shape) self.zero = self.zero.reshape(shape) return if len(shape) == 4: self.scale = self.scale.reshape((1, -1, 1, 1)) self.zero = self.zero.reshape((1, -1, 1, 1)) if len(shape) == 3: self.scale = self.scale.reshape((1, 1, -1)) self.zero = self.zero.reshape((1, 1, -1)) if len(shape) == 2: self.scale = self.scale.unsqueeze(0) self.zero = self.zero.unsqueeze(0) def quantize(self, x): if self.ready(): return self._quantize(x, self.scale, self.zero, self.maxq) return x def enabled(self): return self.maxq > 0 def ready(self): return torch.all(self.scale != 0) class GPTQ: def __init__(self, layer, observe=False): self.layer = layer self.dev = self.layer.weight.device W = layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() self.rows = W.shape[0] self.columns = W.shape[1] self.H = torch.zeros((self.columns, self.columns), device=self.dev) self.nsamples = 0 self.quantizer = Quantizer() self.observe = observe def add_batch(self, inp, out): # Hessian H = 2 X XT + λ I if self.observe: self.inp1 = inp self.out1 = out else: self.inp1 = None self.out1 = None if len(inp.shape) == 2: inp = inp.unsqueeze(0) tmp = inp.shape[0] if isinstance(self.layer, nn.Linear) or isinstance( self.layer, transformers.Conv1D ): if len(inp.shape) == 3: inp = inp.reshape((-1, inp.shape[-1])) inp = inp.t() if isinstance(self.layer, nn.Conv2d): unfold = nn.Unfold( self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride, ) inp = unfold(inp) inp = inp.permute([1, 0, 2]) inp = inp.flatten(1) self.H *= self.nsamples / (self.nsamples + tmp) self.nsamples += tmp # inp = inp.float() inp = math.sqrt(2 / self.nsamples) * inp.float() # self.H += 2 / self.nsamples * inp.matmul(inp.t()) self.H += inp.matmul(inp.t()) def print_loss(self, name, q_weight, weight_error, timecost): table = Texttable() length = 28 name = ( (name + " " * (length - len(name))) if len(name) <= length else name[:length] ) table.header(["name", "weight_error", "fp_inp_SNR", "q_inp_SNR", "time"]) # assign weight self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to( self.layer.weight.data.dtype ) if self.inp1 is not None: # quantize input to int8 quantizer = Quantizer() quantizer.configure(8, perchannel=False, sym=True, mse=False) quantizer.find_params(self.inp1) q_in = quantizer.quantize(self.inp1).type(torch.float16) q_out = self.layer(q_in) # get kinds of SNR q_SNR = torch_snr_error(q_out, self.out1).item() fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item() else: q_SNR = "-" fp_SNR = "-" table.add_row([name, weight_error, fp_SNR, q_SNR, timecost]) print(table.draw().split("\n")[-2]) def fasterquant( self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name="" ): self.layer.to(self.dev) W = self.layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() W = W.float() tick = time.time() if not self.quantizer.ready(): self.quantizer.find_params(W, weight=True) H = self.H if not self.observe: del self.H dead = torch.diag(H) == 0 H[dead, dead] = 1 W[:, dead] = 0 if act_order: perm = torch.argsort(torch.diag(H), descending=True) W = W[:, perm] H = H[perm][:, perm] Losses = torch.zeros_like(W) Q = torch.zeros_like(W) damp = percdamp * torch.mean(torch.diag(H)) diag = torch.arange(self.columns, device=self.dev) H[diag, diag] += damp H = torch.linalg.cholesky(H) H = torch.cholesky_inverse(H) try: H = torch.linalg.cholesky(H, upper=True) except Exception: # Addition because Falcon fails on h_to_4h H = torch.linalg.cholesky( H + 1e-5 * torch.eye(H.shape[0]).to(H.device), upper=True ) Hinv = H g_idx = [] scale = [] zero = [] now_idx = 1 for i1 in range(0, self.columns, blocksize): i2 = min(i1 + blocksize, self.columns) count = i2 - i1 W1 = W[:, i1:i2].clone() Q1 = torch.zeros_like(W1) Err1 = torch.zeros_like(W1) Losses1 = torch.zeros_like(W1) Hinv1 = Hinv[i1:i2, i1:i2] for i in range(count): w = W1[:, i] d = Hinv1[i, i] if groupsize != -1: if (i1 + i) % groupsize == 0: self.quantizer.find_params( W[:, (i1 + i) : (i1 + i + groupsize)], weight=True ) if ((i1 + i) // groupsize) - now_idx == -1: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) now_idx += 1 q = self.quantizer.quantize(w.unsqueeze(1)).flatten() Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2 err1 = (w - q) / d W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) Err1[:, i] = err1 Q[:, i1:i2] = Q1 Losses[:, i1:i2] = Losses1 / 2 W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) torch.cuda.synchronize() error = torch.sum(Losses).item() groupsize = groupsize if groupsize != -1 else self.columns g_idx = [i // groupsize for i in range(self.columns)] g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device) if act_order: invperm = torch.argsort(perm) Q = Q[:, invperm] g_idx = g_idx[invperm] if isinstance(self.layer, transformers.Conv1D): Q = Q.t() self.print_loss( name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick) ) if scale == []: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) scale = torch.cat(scale, dim=1) zero = torch.cat(zero, dim=1) return scale, zero, g_idx, error def free(self): self.inp1 = None self.out1 = None self.H = None self.Losses = None self.Trace = None torch.cuda.empty_cache() def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt") testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", use_auth_token=False, ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", use_auth_token=False, ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) import random random.seed(0) valenc = [] for _ in range(256): while True: i = random.randint(0, len(valdata) - 1) tmp = tokenizer(valdata[i]["text"], return_tensors="pt") if tmp.input_ids.shape[1] >= seqlen: break i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1) j = i + seqlen valenc.append(tmp.input_ids[:, i:j]) valenc = torch.hstack(valenc) class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") testdata = load_dataset("ptb_text_only", "penn_treebank", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) valenc = tokenizer(" ".join(valdata[:1100]["text"]), return_tensors="pt") valenc = valenc.input_ids[:, : (256 * seqlen)] class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_loaders( name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False ): if "wikitext2" in name: return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code) if "ptb" in name: if "new" in name: return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code) if "c4" in name: if "new" in name: return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code) def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""): # Skip last lm_head linear # Need isintance Falcon is inheriting Linear. if isinstance(module, layers) and "lm_head" not in name: return {name: module} res = {} for name1, child in module.named_children(): res.update( find_layers( child, layers=layers, name=name + "." + name1 if name != "" else name1 ) ) return res @torch.no_grad() def sequential( model, dataloader, dev, nsamples, bits, groupsize, *, hooks, percdamp=0.01, sym: bool = False, act_order: bool = False, ): print("Starting ...") use_cache = model.config.use_cache model.config.use_cache = False try: layers = model.model.layers prefix = "model.layers" except Exception: layers = model.transformer.h prefix = "transformer.h" dtype = next(iter(model.parameters())).dtype inps = torch.zeros( (nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev ) cache = {"i": 0} extra = {} class Catcher(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, inp, **kwargs): inps[cache["i"]] = inp cache["i"] += 1 extra.update(kwargs.copy()) raise ValueError layers[0] = Catcher(layers[0]) for batch in dataloader: try: model(batch[0].cuda()) except ValueError: pass layers[0] = layers[0].module # layers[0] = layers[0].cpu() # model.model.embed_tokens = model.model.embed_tokens.cpu() # model.model.norm = model.model.norm.cpu() torch.cuda.empty_cache() for hook in hooks: hook.remove() outs = torch.zeros_like(inps) extra = { k: v.to(dev) if isinstance(v, torch.Tensor) else v for k, v in extra.items() } print("Ready.") quantizers = {} for i in range(len(layers)): print(f"Quantizing layer {i+1}/{len(layers)}..") print("+------------------+--------------+------------+-----------+-------+") print("| name | weight_error | fp_inp_SNR | q_inp_SNR | time |") print("+==================+==============+============+===========+=======+") layer = layers[i] layer.load() full = find_layers(layer) sequential = [list(full.keys())] for names in sequential: subset = {n: full[n] for n in names} gptq = {} for name in subset: gptq[name] = GPTQ(subset[name]) gptq[name].quantizer.configure( bits, perchannel=True, sym=sym, mse=False ) pass def add_batch(name): nonlocal gptq def tmp(_, inp, out): gptq[name].add_batch(inp[0].data, out.data) return tmp handles = [] for name in subset: handles.append(subset[name].register_forward_hook(add_batch(name))) for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] for h in handles: h.remove() for name in subset: scale, zero, g_idx, error = gptq[name].fasterquant( percdamp=percdamp, groupsize=groupsize, act_order=act_order, name=name, ) quantizers[f"{prefix}.{i}.{name}"] = ( gptq[name].quantizer.cpu(), scale.cpu(), zero.cpu(), g_idx.cpu(), bits, groupsize, ) gptq[name].free() for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] layer.unload() del layer del gptq torch.cuda.empty_cache() inps, outs = outs, inps print("+------------------+--------------+------------+-----------+-------+") print("\n") model.config.use_cache = use_cache return quantizers def make_quant_linear(module, names, bits, groupsize, name=""): if isinstance(module, QuantLinear): return for attr in dir(module): tmp = getattr(module, attr) name1 = name + "." + attr if name != "" else attr if name1 in names: delattr(module, attr) setattr( module, attr, QuantLinear.new( bits, groupsize, tmp.in_features, tmp.out_features, tmp.bias is not None, ), ) for name1, child in module.named_children(): make_quant_linear( child, names, bits, groupsize, name + "." + name1 if name != "" else name1 ) # TODO: perform packing on GPU def pack(model, quantizers, bits, groupsize): layers = find_layers(model) layers = {n: layers[n] for n in quantizers} make_quant_linear(model, quantizers, bits, groupsize) qlayers = find_layers(model, (QuantLinear,)) print("Packing ...") for name in qlayers: print(name) quantizers[name], scale, zero, g_idx, _, _ = quantizers[name] qlayers[name].pack(layers[name], scale, zero, g_idx) print("Done.") return model def setdeepattr(module, full_name, tensor): current = module tokens = full_name.split(".") for token in tokens[:-1]: current = getattr(current, token) setattr(current, tokens[-1], tensor) def getdeepattr(module, full_name): current = module tokens = full_name.split(".") for token in tokens: current = getattr(current, token) return current def load_weights_pre_hook(module_name, weights, recursive=False): def inner(module, args): print(f"Pre hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: current_tensor = getdeepattr(module, local_param) if current_tensor.device == torch.device("meta"): # print(f"Loading {local_param}") if module_name: tensor_name = f"{module_name}.{local_param}" else: tensor_name = local_param tensor = weights.get_tensor(tensor_name) setdeepattr(module, local_param, nn.Parameter(tensor)) else: tensor = current_tensor.to(device=torch.device("cuda:0")) if current_tensor.requires_grad: tensor = nn.Parameter(tensor) setdeepattr(module, local_param, tensor) return inner def load_weights_post_hook(module_name, weights, recursive=False): def inner(module, args, output): print(f"Post hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: # print(f"Unloading {local_param}") current_tensor = getdeepattr(module, local_param) setdeepattr( module, local_param, nn.Parameter(current_tensor.to(device=torch.device("cpu"))), ) return output return inner def quantize( model_id: str, bits: int, groupsize: int, output_dir: str, revision: str, trust_remote_code: bool, upload_to_model_id: Optional[str], percdamp: float, act_order: bool, sym: bool, ): print("loading model") config = AutoConfig.from_pretrained( model_id, trust_remote_code=trust_remote_code, ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code ) model = model.eval() print("LOADED model") files = weight_files(model_id, revision, extension=".safetensors") process_group, _, _ = initialize_torch_distributed() weights = Weights( files, device=torch.device("cuda:0"), dtype=torch.float16, process_group=process_group, aliases={"embed_tokens.weight": ["lm_head.weight"]}, weights_loader=DefaultWeightsLoader(UnquantizedWeight), ) hooks = [] for name, module in model.named_modules(): def load(module, name): def _load(): load_weights_pre_hook(name, weights, recursive=True)(module, None) return _load def unload(module, name): def _unload(): load_weights_post_hook(name, weights, recursive=True)( module, None, None ) return _unload module.load = load(module, name) module.unload = unload(module, name) hooks.append( module.register_forward_pre_hook(load_weights_pre_hook(name, weights)) ) hooks.append( module.register_forward_hook(load_weights_post_hook(name, weights)) ) model.seqlen = 2048 dataset = "wikitext2" nsamples = 128 seed = None dataloader, testloader = get_loaders( dataset, nsamples=nsamples, seed=seed, model_id=model_id, seqlen=model.seqlen, trust_remote_code=trust_remote_code, ) tick = time.time() quantizers = sequential( model, dataloader, DEV, nsamples, bits, groupsize, percdamp=percdamp, act_order=act_order, hooks=hooks, sym=sym, ) print(time.time() - tick) pack(model, quantizers, bits, groupsize) from safetensors.torch import save_file from huggingface_hub import split_torch_state_dict_into_shards state_dict = model.state_dict() state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()} max_shard_size = "10GB" state_dict_split = split_torch_state_dict_into_shards( state_dict, filename_pattern="model.safetensors", max_shard_size=max_shard_size, ) index = None if state_dict_split.is_sharded: index = { "metadata": state_dict_split.metadata, "weight_map": state_dict_split.tensor_to_filename, } shards = state_dict_split.filename_to_tensors os.makedirs(output_dir, exist_ok=True) for shard_file, shard in shards.items(): save_file( shard, os.path.join(output_dir, shard_file), metadata={ "format": "pt", "quantized": "gptq", "origin": "text-generation-inference", }, ) if index is None: path_to_weights = os.path.join(output_dir, "model.safetensors") logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = "model.safetensors.index.json" save_index_file = os.path.join(output_dir, save_index_file) with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code) config.quantization_config = { "bits": bits, "group_size": groupsize, "damp_percent": percdamp, "desc_act": act_order, "static_groups": False, "sym": sym, "quant_method": "gptq", } config.save_pretrained(output_dir) logger.info("Saved config") logger.info("Saving tokenizer") tokenizer = AutoTokenizer.from_pretrained( model_id, trust_remote_code=trust_remote_code ) tokenizer.save_pretrained(output_dir) logger.info("Saved tokenizer") if upload_to_model_id: api = HfApi() api.upload_folder( folder_path=output_dir, repo_id=upload_to_model_id, repo_type="model" )
text-generation-inference/server/text_generation_server/layers/gptq/quantize.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/quantize.py", "repo_id": "text-generation-inference", "token_count": 16305 }
from dataclasses import dataclass from typing import List, Optional import torch import torch.nn as nn from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.weights import Weights from text_generation_server.layers.marlin.gptq import ( GPTQMarlinWeight, GPTQMarlinWeightsLoader, ) if SYSTEM == "cuda": from moe_kernels.fused_marlin_moe import fused_marlin_moe else: fused_marlin_moe = None try: major, _minor = torch.cuda.get_device_capability() has_sm_8_0 = major >= 8 except Exception: has_sm_8_0 = False def can_use_marlin_moe_gemm( *, quant_method: str, quantize: str, sym: bool, ): return ( SYSTEM == "cuda" and fused_marlin_moe is not None and has_sm_8_0 and quantize in {"awq", "gptq"} and quant_method in {"awq", "gptq"} # We only support asymmetric quantization for AWQ. and (sym or quant_method == "awq") ) @dataclass class GPTQMarlinMoEWeight: qweight: torch.Tensor qzeros: torch.Tensor scales: torch.Tensor g_idx: torch.Tensor perm: torch.Tensor is_full_k: bool class GPTQMarlinSparseMoELayer(nn.Module): """ MoE layer that uses a fused GPTQ-Marlin kernel. """ def __init__( self, *, n_expert_group: Optional[int], n_experts: int, prefix: str, renormalize: bool, topk: int, topk_group: Optional[int], weights: Weights, gate_proj_name: str = "gate_proj", up_proj_name: str = "up_proj", down_proj_name: str = "down_proj", scoring_func: Optional[str] = None, e_score_correction_bias: Optional[float] = None, ): assert scoring_func == "softmax", f"scoring func {scoring_func} is not handled" assert e_score_correction_bias is None, "scoring correction bias is not handled" super().__init__() if not ( isinstance(weights.loader, GPTQMarlinWeightsLoader) and can_use_marlin_moe_gemm( quant_method=weights.loader.quant_method, quantize=weights.loader.quantize, sym=weights.loader.sym, ) ): raise ValueError( f"Unsupported weights loader: {type(weights.loader)}, only GPTQMarlinWeightsLoader with AWQ and symmetric GPTQ quantization is supported" ) assert (n_expert_group is None) == ( topk_group is None ), "n_expert_group and topk_group must both be None or have some value" self.n_expert_group = n_expert_group self.topk = topk self.topk_group = topk_group self.renormalize = renormalize self.gate_up_proj = _load_expert_multi_weights_col( prefix=prefix, n_experts=n_experts, names=[gate_proj_name, up_proj_name], weights=weights, ) self.down_proj = _load_expert_weights_row( prefix=prefix, n_experts=n_experts, name=down_proj_name, weights=weights ) self.bits = weights.loader.bits def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor: return fused_marlin_moe( hidden_states=x, w1=self.gate_up_proj.qweight, w2=self.down_proj.qweight, w1_scale=self.gate_up_proj.scales, w2_scale=self.down_proj.scales, w1_zeros=( self.gate_up_proj.qzeros if self.gate_up_proj.qzeros.numel() > 0 else None ), w2_zeros=( self.down_proj.qzeros if self.down_proj.qzeros.numel() > 0 else None ), g_idx1=self.gate_up_proj.g_idx, g_idx2=self.down_proj.g_idx, sort_indices1=self.gate_up_proj.perm, sort_indices2=self.down_proj.perm, is_k_full=self.gate_up_proj.is_full_k or self.down_proj.is_full_k, gating_output=gating_output, topk=self.topk, renormalize=self.renormalize, use_grouped_topk=self.n_expert_group is not None, num_expert_group=self.n_expert_group, topk_group=self.topk_group, num_bits=self.bits, ) def _load_expert_multi_weights_col( *, prefix: str, n_experts: int, names: List[str], weights: Weights, ) -> GPTQMarlinMoEWeight: moe_weight = None for i in range(n_experts): weight = weights.get_multi_weights_col( [f"{prefix}.{i}.{name}" for name in names], 0 ) assert isinstance(weight, GPTQMarlinWeight) moe_weight = _pack_weight( n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight ) assert moe_weight is not None return moe_weight def _load_expert_weights_row( *, prefix: str, n_experts: int, name: str, weights: Weights, ) -> GPTQMarlinMoEWeight: moe_weight = None for i in range(n_experts): weight = weights.get_weights_row( f"{prefix}.{i}.{name}", ) assert isinstance(weight, GPTQMarlinWeight) moe_weight = _pack_weight( n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight ) assert moe_weight is not None return moe_weight def _pack_weight( *, n_experts: int, expert: int, moe_weight: Optional[GPTQMarlinMoEWeight], weight: GPTQMarlinWeight, ) -> GPTQMarlinMoEWeight: if moe_weight is None: qweight = torch.empty( (n_experts,) + weight.qweight.shape, dtype=weight.qweight.dtype, device=weight.qweight.device, ) qzeros = torch.empty( (n_experts,) + weight.qzeros.shape, dtype=weight.qzeros.dtype, device=weight.qzeros.device, ) scales = torch.empty( (n_experts,) + weight.scales.shape, dtype=weight.scales.dtype, device=weight.scales.device, ) g_idx = torch.empty( (n_experts,) + weight.g_idx.shape, dtype=weight.g_idx.dtype, device=weight.g_idx.device, ) perm = torch.empty( (n_experts,) + weight.perm.shape, dtype=weight.perm.dtype, device=weight.perm.device, ) moe_weight = GPTQMarlinMoEWeight( qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, perm=perm, is_full_k=weight.is_full_k, ) moe_weight.qweight[expert] = weight.qweight moe_weight.qzeros[expert] = weight.qzeros moe_weight.scales[expert] = weight.scales moe_weight.g_idx[expert] = weight.g_idx moe_weight.perm[expert] = weight.perm return moe_weight
text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py", "repo_id": "text-generation-inference", "token_count": 3509 }
def load_text_model(prefix, config, weights, name=None): if config.model_type == "llama": from text_generation_server.models.custom_modeling.flash_llama_modeling import ( FlashLlamaForCausalLM, ) return FlashLlamaForCausalLM(prefix, config, weights, name=name) elif config.model_type == "mistral": from text_generation_server.models.custom_modeling.flash_mistral_modeling import ( FlashMistralForCausalLM, ) return FlashMistralForCausalLM(prefix, config, weights, name=name) elif config.model_type == "gemma": from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( FlashGemmaForCausalLM, ) return FlashGemmaForCausalLM(prefix, config, weights, causal=False) elif config.model_type == "gemma2": from text_generation_server.models.custom_modeling.flash_gemma2_modeling import ( FlashGemma2ForCausalLM, ) return FlashGemma2ForCausalLM(prefix, config, weights) elif config.model_type == "paligemma": from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( FlashGemmaForCausalLM, ) return FlashGemmaForCausalLM(prefix, config, weights) else: raise RuntimeError(f"Unsupported model type {config.model_type}") def load_vision_model(prefix, config, weights): if config.model_type == "clip_vision_model": from text_generation_server.models.custom_modeling.clip import ( CLIPVisionTransformer, ) return CLIPVisionTransformer( prefix=f"{prefix}.vision_model", config=config, weights=weights ) if config.model_type == "siglip_vision_model": from text_generation_server.models.custom_modeling.siglip import ( SiglipVisionTransformer, ) return SiglipVisionTransformer( prefix="vision_tower.vision_model", config=config, weights=weights ) else: raise RuntimeError(f"Unsupported model type {config.model_type}")
text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/vlm.py", "repo_id": "text-generation-inference", "token_count": 868 }
import grpc from opentelemetry import trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.instrumentation.grpc._aio_server import ( OpenTelemetryAioServerInterceptor, ) from opentelemetry.semconv.trace import SpanAttributes from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( BatchSpanProcessor, ) class UDSOpenTelemetryAioServerInterceptor(OpenTelemetryAioServerInterceptor): def __init__(self): super().__init__(trace.get_tracer(__name__)) def _start_span(self, handler_call_details, context, set_status_on_exception=False): """ Rewrite _start_span method to support Unix Domain Socket gRPC contexts """ # standard attributes attributes = { SpanAttributes.RPC_SYSTEM: "grpc", SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0], } # if we have details about the call, split into service and method if handler_call_details.method: service, method = handler_call_details.method.lstrip("/").split("/", 1) attributes.update( { SpanAttributes.RPC_METHOD: method, SpanAttributes.RPC_SERVICE: service, } ) # add some attributes from the metadata metadata = dict(context.invocation_metadata()) if "user-agent" in metadata: attributes["rpc.user_agent"] = metadata["user-agent"] # We use gRPC over a UNIX socket attributes.update({SpanAttributes.NET_TRANSPORT: "unix"}) return self._tracer.start_as_current_span( name=handler_call_details.method, kind=trace.SpanKind.SERVER, attributes=attributes, set_status_on_exception=set_status_on_exception, ) def setup_tracing(otlp_service_name: str, otlp_endpoint: str): resource = Resource.create(attributes={"service.name": otlp_service_name}) span_exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True) span_processor = BatchSpanProcessor(span_exporter) trace.set_tracer_provider(TracerProvider(resource=resource)) trace.get_tracer_provider().add_span_processor(span_processor)
text-generation-inference/server/text_generation_server/tracing.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/tracing.py", "repo_id": "text-generation-inference", "token_count": 969 }
# Origin: https://github.com/predibase/lorax # Path: lorax/server/lorax_server/utils/sgmv.py # License: Apache License Version 2.0, January 2004 import os import warnings from functools import lru_cache from typing import List, Tuple import torch import torch.nn.functional as F try: import punica_kernels as _kernels HAS_SGMV = not bool(os.environ.get("DISABLE_SGMV", "")) except ImportError: warnings.warn("Could not import SGMV kernel from Punica, falling back to loop.") _kernels = None HAS_SGMV = False MIN_SGMV_RANK = 8 MIN_RANK_CUSTOM = 16 MAX_RANK_CUSTOM = 128 SGMV_BLOCK_SIZE = 16 BGMV_MAX_RANK = 64 def has_sgmv() -> bool: return HAS_SGMV def pad_rank(t: torch.Tensor, dim: int, world_size: int) -> torch.Tensor: """Pad a tensor to the minimum rank for SGMV and the nearest multiple of the SGMV block size.""" if not has_sgmv(): return t # tensor parallelism will result in effective rank being divided by world_size, # so we need to scale the min rank to offset that effect min_rank = MIN_SGMV_RANK * world_size # if we're at or below the min rank, pad up to the min rank # otherwise, pad to the nearest multiple of the block size current_rank = t.size(dim) target_rank = ( min_rank if current_rank <= min_rank else (current_rank + SGMV_BLOCK_SIZE - 1) // SGMV_BLOCK_SIZE * SGMV_BLOCK_SIZE ) if current_rank == target_rank: return t pad_size = target_rank - current_rank # see complicatd pad syntax here: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html pad = [0, 0] * t.dim() pad[(t.dim() - dim - 1) * 2 + 1] = pad_size pad = tuple(pad) return F.pad(t, pad, mode="constant", value=0.0) def use_cutlass_shrink(lora_rank: int) -> bool: return lora_rank < MIN_RANK_CUSTOM def orient_for_rank(t: torch.Tensor, rank: int) -> torch.Tensor: if MIN_RANK_CUSTOM <= rank <= MAX_RANK_CUSTOM: return t.transpose(0, 1) return t # Source: https://github.com/punica-ai/punica/blob/master/src/punica/ops/__init__.py def add_lora_sgmv_cutlass( y: torch.Tensor, x: torch.Tensor, wa_ptr: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.Tensor, s_end: torch.Tensor, layer_idx: int, lora_rank: int, ): """ Semantics: y[s[i]:s[i+1]] += x[s[i]:s[i+1]] @ deref(wa_ptr[i]).T @ deref(wb_ptr[i]) Args: y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. x: Shape: `[B, H1]`. Input vectors. wa_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\ Weight matrix shape: `[num_layers, R, H1]`. wb_ptr: Shape: `[S]`. DType: torch.int64. Pointer to the weight matrices.\ Weight matrix shape: `[num_layers, R, H2]`. s_start: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices start indices. s_end: Shape: `[S]`, DType: torch.int32. Indptr of the weight matrices end indices. layer_idx: Layer index of the weight matrices. """ if lora_rank < MIN_RANK_CUSTOM or lora_rank > MAX_RANK_CUSTOM: # Custom SGMV shrink only supports rank 16, 32, 64, 128 _add_lora_sgmv_cutlass_legacy( y, x, wa_ptr, wb_ptr, s_start, s_end, layer_idx, lora_rank ) return tmp1 = torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=x.device) tmp2_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0)) tmp2 = torch.empty((tmp2_size,), dtype=torch.uint8, device=x.device) v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) _kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp1, layer_idx) _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp2, layer_idx) def _add_lora_sgmv_cutlass_legacy( y: torch.Tensor, x: torch.Tensor, wa_ptr: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, lora_rank: int, ): tmp_size = _kernels.sgmv_cutlass_tmp_size(wa_ptr.size(0)) tmp = torch.empty((tmp_size,), dtype=torch.uint8, device=x.device) v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) _kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx) @lru_cache(maxsize=1) def get_tmp_tensor(device: torch.device) -> torch.Tensor: return torch.empty((8 * 1024 * 1024,), dtype=torch.uint8, device=device) @lru_cache(maxsize=32) def get_tmp_tensor_for_size(size: int, device: torch.device) -> torch.Tensor: tmp_size = _kernels.sgmv_cutlass_tmp_size(size) return torch.empty((tmp_size,), dtype=torch.uint8, device=device) def get_tmp_tensor_for_size_no_kernels(size: int, device: torch.device) -> torch.Tensor: return torch.empty((size,), dtype=torch.uint8, device=device) def get_tmp_expand_size(size: int) -> int: return _kernels.sgmv_cutlass_tmp_size(size) def get_tmp_tensors( nsegments: int, lora_rank: int, device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor]: use_cutlass = use_cutlass_shrink(lora_rank) and has_sgmv() has_sgmv_available = has_sgmv() if use_cutlass: tmp = get_tmp_tensor_for_size(nsegments, device) return tmp, tmp elif has_sgmv_available: return get_tmp_tensor(device), get_tmp_tensor_for_size(nsegments, device) else: tmp = get_tmp_tensor_for_size(nsegments, device) return tmp, tmp def lora_a_sgmv_cutlass( x: torch.Tensor, tmp: torch.Tensor, wa_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, lora_rank: int, ) -> torch.Tensor: v = torch.zeros((x.size(0), lora_rank), dtype=x.dtype, device=x.device) if MIN_RANK_CUSTOM <= lora_rank <= MAX_RANK_CUSTOM: _kernels.sgmv_shrink(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) else: _kernels.sgmv_cutlass(v, x, wa_ptr, s_start, s_end, tmp, layer_idx) return v def lora_b_sgmv_cutlass( y: torch.Tensor, v: torch.Tensor, tmp: torch.Tensor, wb_ptr: torch.Tensor, s_start: torch.IntTensor, s_end: torch.IntTensor, layer_idx: int, ): _kernels.sgmv_cutlass(y, v, wb_ptr, s_start, s_end, tmp, layer_idx) """ Semantics: y[i] += ( x[i].unsqueeze(0) @ wa_T_all[indices[i], layer_idx, :, :].transpose(-1, -2) @ wb_T_all[indices[i], layer_idx, :, :].transpose(-1, -2) * scale ).squeeze(0) Args: y: Shape: `[B, H2]`. Output vectors. Will be changed in-place. v: Shape: `[B, R]`. Temporary vector. x: Shape: `[B, H1]`. Input vectors. wa_T_all: Shape: `[None, L, R, H1]`. All of the transposed LoRA A matrices. wb_T_all: Shape: `[None, L, H2, R]`. All of the transposed LoRA B matrices. indicies: Shape: `[B]`. Indices of the LoRA weights. layer_idx: Layer index of LoRA weights. scale: Scaling factor. """ def add_lora_a_bgmv( v: torch.Tensor, x: torch.Tensor, wa_T_all: torch.Tensor, indicies: torch.LongTensor, layer_idx: int, ): _kernels.dispatch_bgmv(v, x, wa_T_all, indicies, layer_idx, 1.0) def add_lora_b_bgmv( y: torch.Tensor, v: torch.Tensor, wb_T_all: torch.Tensor, indicies: torch.LongTensor, layer_idx: int, ): _kernels.dispatch_bgmv(y, v, wb_T_all, indicies, layer_idx, 1.0) def segmented_matmul( y: torch.Tensor, x: torch.Tensor, w: List[torch.Tensor], b: List[torch.Tensor], s_start: torch.IntTensor, s_end: torch.IntTensor, ): for i in range(len(w)): if s_end[i] - s_start[i] <= 0: continue xi = x[s_start[i] : s_end[i]] wi = w[i] bi = b[i] y[s_start[i] : s_end[i]] = F.linear(xi, wi, bi)
text-generation-inference/server/text_generation_server/utils/sgmv.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/sgmv.py", "repo_id": "text-generation-inference", "token_count": 3651 }
extern crate tokenizers as tk; use crate::encoding::*; use crate::tokenizer::Tokenizer; use napi::bindgen_prelude::*; use tk::tokenizer::{EncodeInput, Encoding}; pub struct EncodeTask<'s> { pub tokenizer: Tokenizer, pub input: Option<EncodeInput<'s>>, pub add_special_tokens: bool, } impl Task for EncodeTask<'static> { type Output = Encoding; type JsValue = JsEncoding; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .encode_char_offsets( self .input .take() .ok_or(Error::from_reason("No provided input"))?, self.add_special_tokens, ) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(JsEncoding { encoding: Some(output), }) } } pub struct DecodeTask { pub tokenizer: Tokenizer, pub ids: Vec<u32>, pub skip_special_tokens: bool, } impl Task for DecodeTask { type Output = String; type JsValue = String; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .decode(&self.ids, self.skip_special_tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(output) } } pub struct EncodeBatchTask<'s> { pub tokenizer: Tokenizer, pub inputs: Option<Vec<EncodeInput<'s>>>, pub add_special_tokens: bool, } impl Task for EncodeBatchTask<'static> { type Output = Vec<Encoding>; type JsValue = Vec<JsEncoding>; fn compute(&mut self) -> Result<Self::Output> { self .tokenizer .tokenizer .read() .unwrap() .encode_batch_char_offsets( self .inputs .take() .ok_or(Error::from_reason("No provided input"))?, self.add_special_tokens, ) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok( output .into_iter() .map(|encoding| JsEncoding { encoding: Some(encoding), }) .collect(), ) } } pub struct DecodeBatchTask { pub tokenizer: Tokenizer, pub ids: Vec<Vec<u32>>, pub skip_special_tokens: bool, } impl Task for DecodeBatchTask { type Output = Vec<String>; type JsValue = Vec<String>; fn compute(&mut self) -> Result<Self::Output> { let ids: Vec<_> = self.ids.iter().map(|s| s.as_slice()).collect(); self .tokenizer .tokenizer .read() .unwrap() .decode_batch(&ids, self.skip_special_tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(output) } }
tokenizers/bindings/node/src/tasks/tokenizer.rs/0
{ "file_path": "tokenizers/bindings/node/src/tasks/tokenizer.rs", "repo_id": "tokenizers", "token_count": 1295 }
from typing import List import jieba from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import BPE from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer class JiebaPreTokenizer: def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: splits = [] # we need to call `str(normalized_string)` because jieba expects a str, # not a NormalizedString for token, start, stop in jieba.tokenize(str(normalized_string)): splits.append(normalized_string[start:stop]) return splits # We can also easily do it in one line: # return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))] def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]: # Just an odd example... splits = [] last = 0 for i, char in enumerate(str(normalized_string)): if char.isnumeric() and int(char) % 2 == 1: splits.append(normalized_string[last:i]) last = i # Don't forget the last one splits.append(normalized_string[last:]) return splits def pre_tokenize(self, pretok: PreTokenizedString): # Let's call split on the PreTokenizedString to split using `self.jieba_split` pretok.split(self.jieba_split) # Here we can call `pretok.split` multiple times if we want to apply # different algorithm, but we generally just need to call it once. pretok.split(self.odd_number_split) class CustomDecoder: def decode(self, tokens: List[str]) -> str: return "".join(tokens) class CustomNormalizer: def normalize(self, normalized: NormalizedString): # Most of these can be replaced by a `Sequence` combining some provided Normalizer, # (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ]) # and it should be the preferred way. That being said, here is an example of the kind # of things that can be done here: normalized.nfkc() normalized.filter(lambda char: not char.isnumeric()) normalized.replace(Regex("\s+"), " ") normalized.lowercase() # This section shows how to attach these custom components to the Tokenizer tok = Tokenizer(BPE()) tok.normalizer = Normalizer.custom(CustomNormalizer()) tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer()) tok.decoder = Decoder.custom(CustomDecoder()) input = "氞和服装饰品有限公叞" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('æ°žå’Œ', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公叞', (6, 10))] input = "112233" print("PreTokenize:", input) print(tok.pre_tokenizer.pre_tokenize_str(input)) # [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))] input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!" print("Normalize:", input) print(tok.normalizer.normalize_str(input)) # " hello there my dear dear friend!"
tokenizers/bindings/python/examples/custom_components.py/0
{ "file_path": "tokenizers/bindings/python/examples/custom_components.py", "repo_id": "tokenizers", "token_count": 1292 }
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): """SentencePiece Unigram Tokenizer Represents the Unigram algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[List[Tuple[str, float]]] = None, replacement: str = "▁", add_prefix_space: bool = True, ): if vocab is not None: # Let Unigram(..) fail if only one of them is None tokenizer = Tokenizer(Unigram(vocab)) else: tokenizer = Tokenizer(Unigram()) tokenizer.normalizer = normalizers.Sequence( [normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")] ) prepend_scheme = "always" if add_prefix_space else "never" tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) parameters = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(tokenizer, parameters) def train( self, files: Union[str, List[str]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, ): """ Train the model using the given files Args: files (:obj:`List[str]`): A list of path to the files that we should use for training vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 8000, show_progress: bool = True, special_tokens: Optional[List[Union[str, AddedToken]]] = None, initial_alphabet: Optional[List[str]] = None, unk_token: Optional[str] = None, length: Optional[int] = None, ): """ Train the model using the given iterator Args: iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`): Any iterator over strings or list of strings vocab_size (:obj:`int`): The size of the final vocabulary, including all tokens and alphabet. show_progress (:obj:`bool`): Whether to show progress bars while training. special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): A list of special tokens the model should know of. initial_alphabet (:obj:`List[str]`, `optional`): A list of characters to include in the initial alphabet, even if not seen in the training dataset. If the strings contain more than one character, only the first one is kept. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. length (:obj:`int`, `optional`): The total number of sequences in the iterator. This is used to provide meaningful progress tracking """ if special_tokens is None: special_tokens = [] if initial_alphabet is None: initial_alphabet = [] trainer = trainers.UnigramTrainer( vocab_size=vocab_size, special_tokens=special_tokens, show_progress=show_progress, initial_alphabet=initial_alphabet, unk_token=unk_token, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, ) @staticmethod def from_spm(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) precompiled_charsmap = m.normalizer_spec.precompiled_charsmap vocab = [(piece.piece, piece.score) for piece in m.pieces] unk_id = m.trainer_spec.unk_id model_type = m.trainer_spec.model_type byte_fallback = m.trainer_spec.byte_fallback if model_type != 1: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) replacement = "▁" add_prefix_space = True tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback)) if precompiled_charsmap: tokenizer.normalizer = normalizers.Sequence( [ normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " "), ] ) else: tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")]) prepend_scheme = "always" if add_prefix_space else "never" tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) parameters = { "model": "SentencePieceUnigram", } obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters) BaseTokenizer.__init__(obj, tokenizer, parameters) return obj
tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py", "repo_id": "tokenizers", "token_count": 3405 }
import transformers from tokenizers.implementations import SentencePieceUnigramTokenizer, BaseTokenizer from tokenizers.processors import TemplateProcessing from tokenizers.models import Unigram, BPE from tokenizers import decoders from tokenizers import Tokenizer, Regex from tokenizers.normalizers import ( StripAccents, NFKD, Lowercase, Sequence, BertNormalizer, Precompiled, Replace, ) from tokenizers.pre_tokenizers import ( Digits, WhitespaceSplit, Metaspace, Sequence as PSequence, ) import json import unicodedata import sys import os import datetime import argparse sys.path.append(".") from spm_parity_check import check_details from sentencepiece_extractor import SentencePieceExtractor def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() def get_proto(filename: str): try: import sys sys.path.append(".") import sentencepiece_model_pb2 as model except Exception: raise Exception( "You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required." ) m = model.ModelProto() m.ParseFromString(open(filename, "rb").read()) return m class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class SpmConverter(Converter): def __init__(self, *args): super().__init__(*args) self.proto = get_proto(self.original_tokenizer.vocab_file) def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: vocab, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() tokenizer = Tokenizer(BPE(vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True)) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return Sequence([Precompiled(precompiled_charsmap), Replace(Regex(" {2,}"), " ")]) def post_processor(self, tokenizer): return None def converted(self): tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" prepend_scheme = "always" tokenizer.pre_tokenizer = Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) post_processor = self.post_processor(tokenizer) if post_processor: tokenizer.post_processor = post_processor # TODO what parameters should we give ? parameters = {} return BaseTokenizer(tokenizer, parameters) class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["[CLS]", "$0", "[SEP]"], seq_b=["$1", "[SEP]"], special_tokens=[ ("[CLS]", tokenizer.get_vocab()["[CLS]"]), ("[SEP]", tokenizer.get_vocab()["[SEP]"]), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] return vocab def unk_id(self, proto): return 3 def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>", "en_XX"], seq_b=["$1", "</s>"], special_tokens=[ ("en_XX", tokenizer.get_vocab()["en_XX"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["<s>", "$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[ ("<s>", tokenizer.get_vocab()["<s>"]), ("</s>", tokenizer.get_vocab()["</s>"]), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): normalizers = [Replace("``", '"'), Replace("''", '"')] if not self.original_tokenizer.keep_accents: normalizers.append(NFKD()) normalizers.append(StripAccents()) if self.original_tokenizer.do_lower_case: normalizers.append(Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap normalizers.append(Precompiled(precompiled_charsmap)) normalizers.append(Replace(Regex(" {2,}"), " ")) return Sequence(normalizers) def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "<sep>", "<cls>"], seq_b=["$1", "<sep>"], special_tokens=[ ("<sep>", tokenizer.get_vocab()["<sep>"]), ("<cls>", tokenizer.get_vocab()["<cls>"]), ], ) class ReformerConverter(SpmConverter): pass class PegasusConverter(SpmConverter): offset = 103 def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0), (self.original_tokenizer.eos_token, 0), ] vocab += [(f"unk_{i}", -100) for i in range(2, 2 + self.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.offset def post_processor(self, tokenizer): eos = self.original_tokenizer.eos_token return TemplateProcessing( seq_a=["$0", eos], seq_b=["$1", eos], special_tokens=[(eos, tokenizer.get_vocab()[eos])], ) class T5Converter(SpmConverter): def post_processor(self, tokenizer): return TemplateProcessing( seq_a=["$0", "</s>"], seq_b=["$1", "</s>"], special_tokens=[("</s>", tokenizer.get_vocab()["</s>"])], ) CONVERTERS = { "AlbertTokenizer": AlbertConverter, "CamembertTokenizer": CamembertConverter, "XLMRobertaTokenizer": XLMRobertaConverter, "MBartTokenizer": MBartConverter, "XLNetTokenizer": XLNetConverter, "ReformerTokenizer": ReformerConverter, "PegasusTokenizer": PegasusConverter, "T5Tokenizer": T5Converter, } def check(pretrained, filename): transformer_tokenizer = transformers.AutoTokenizer.from_pretrained(pretrained) converter_class = CONVERTERS[transformer_tokenizer.__class__.__name__] tokenizer = converter_class(transformer_tokenizer).converted() now = datetime.datetime.now trans_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(filename, "r") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = transformer_tokenizer.encode(line) trans = now() tok_ids = tokenizer.encode(line).ids tok = now() trans_total_time += trans - start tok_total_time += tok - trans if ids != tok_ids: if check_details(line, ids, tok_ids, transformer_tokenizer, tokenizer): continue assert ids == tok_ids, f"Error in line {i}: {line} {ids} != {tok_ids}" tokenizer.save(f"{pretrained.replace('/', '-')}.json") return ("OK", trans_total_time / tok_total_time) def main(): pretraineds = [ "albert-base-v1", "albert-large-v1", "albert-xlarge-v1", "albert-xxlarge-v1", "albert-base-v2", "albert-large-v2", "albert-xlarge-v2", "albert-xxlarge-v2", "camembert-base", "xlm-roberta-base", "xlm-roberta-large", "xlm-roberta-large-finetuned-conll02-dutch", "xlm-roberta-large-finetuned-conll02-spanish", "xlm-roberta-large-finetuned-conll03-english", "xlm-roberta-large-finetuned-conll03-german", "facebook/mbart-large-en-ro", "facebook/mbart-large-cc25", "xlnet-base-cased", "xlnet-large-cased", "google/reformer-crime-and-punishment", "t5-small", "google/pegasus-large", ] parser = argparse.ArgumentParser() parser.add_argument( "--filename", required=True, type=str, help="The filename that we are going to encode in both versions to check that conversion worked", ) parser.add_argument( "--models", type=lambda s: s.split(","), default=pretraineds, help=f"The pretrained tokenizers you want to test agains, (default: {pretraineds})", ) args = parser.parse_args() print(args.filename) model_len = 50 status_len = 6 speedup_len = 8 print(f"|{'Model':^{model_len}}|{'Status':^{status_len}}|{'Speedup':^{speedup_len}}|") print(f"|{'-'*model_len}|{'-'*status_len}|{'-'*speedup_len}|") for pretrained in args.models: status, speedup = check(pretrained, args.filename) print(f"|{pretrained:<{model_len}}|{status:^{status_len}}|{speedup:^{speedup_len - 1}.2f}x|") if __name__ == "__main__": main()
tokenizers/bindings/python/scripts/convert.py/0
{ "file_path": "tokenizers/bindings/python/scripts/convert.py", "repo_id": "tokenizers", "token_count": 6304 }
use std::marker::PhantomData; use std::sync::{Arc, Mutex}; mod iterators; mod normalization; mod pretokenization; mod regex; pub mod serde_pyo3; pub use iterators::*; pub use normalization::*; pub use pretokenization::*; pub use regex::*; // RefMut utils pub trait DestroyPtr { fn destroy(&mut self); } pub struct RefMutGuard<'r, T: DestroyPtr> { content: T, r: PhantomData<&'r mut T>, } impl<T: DestroyPtr> RefMutGuard<'_, T> { pub fn new(content: T) -> Self { Self { content, r: PhantomData, } } pub fn get(&self) -> &T { &self.content } } impl<T: DestroyPtr> Drop for RefMutGuard<'_, T> { fn drop(&mut self) { self.content.destroy() } } #[derive(Clone)] pub struct RefMutContainer<T> { inner: Arc<Mutex<Option<*mut T>>>, } impl<T> RefMutContainer<T> { pub fn new(content: &mut T) -> Self { Self { inner: Arc::new(Mutex::new(Some(content))), } } pub fn map<F: FnOnce(&T) -> U, U>(&self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_ref().unwrap() })) } pub fn map_mut<F: FnOnce(&mut T) -> U, U>(&mut self, f: F) -> Option<U> { let lock = self.inner.lock().unwrap(); let ptr = lock.as_ref()?; Some(f(unsafe { ptr.as_mut().unwrap() })) } } impl<T> DestroyPtr for RefMutContainer<T> { fn destroy(&mut self) { self.inner.lock().unwrap().take(); } } unsafe impl<T: Send> Send for RefMutContainer<T> {} unsafe impl<T: Sync> Sync for RefMutContainer<T> {}
tokenizers/bindings/python/src/utils/mod.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/mod.rs", "repo_id": "tokenizers", "token_count": 752 }
import copy import os import pickle import pytest from tokenizers import ( AddedToken, SentencePieceUnigramTokenizer, Tokenizer, models, normalizers, pre_tokenizers, trainers, ) from ..utils import data_dir, train_files class TestBpeTrainer: def test_can_modify(self): trainer = trainers.BpeTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert ( trainers.BpeTrainer(min_frequency=12).__getstate__() == b"""{"BpeTrainer":{"min_frequency":12,"vocab_size":30000,"show_progress":true,"special_tokens":[],"limit_alphabet":null,"initial_alphabet":[],"continuing_subword_prefix":null,"end_of_word_suffix":null,"max_token_length":null,"words":{}}}""" ) assert isinstance(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12))), trainers.BpeTrainer) assert isinstance(copy.deepcopy(trainers.BpeTrainer(min_frequency=12)), trainers.BpeTrainer) # Make sure everything is correct assert pickle.dumps(pickle.loads(pickle.dumps(trainers.BpeTrainer(min_frequency=12)))) == pickle.dumps( trainers.BpeTrainer(min_frequency=12) ) class TestWordPieceTrainer: def test_can_modify(self): trainer = trainers.WordPieceTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"], limit_alphabet=13, initial_alphabet=["a", "b", "c"], continuing_subword_prefix="pref", end_of_word_suffix="suf", ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] assert trainer.limit_alphabet == 13 assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] assert trainer.continuing_subword_prefix == "pref" assert trainer.end_of_word_suffix == "suf" # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.limit_alphabet = None assert trainer.limit_alphabet == None trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] trainer.continuing_subword_prefix = None assert trainer.continuing_subword_prefix == None trainer.end_of_word_suffix = None assert trainer.continuing_subword_prefix == None def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordPieceTrainer())), trainers.WordPieceTrainer) class TestWordLevelTrainer: def test_can_modify(self): trainer = trainers.WordLevelTrainer( vocab_size=12345, min_frequency=12, show_progress=False, special_tokens=["1", "2"] ) assert trainer.vocab_size == 12345 assert trainer.min_frequency == 12 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", special=True), AddedToken("2", special=True), ] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.min_frequency = 1 assert trainer.min_frequency == 1 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.WordLevelTrainer())), trainers.WordLevelTrainer) class TestUnigram: def test_train(self, train_files): tokenizer = SentencePieceUnigramTokenizer() tokenizer.train(train_files["small"], show_progress=False) filename = "tests/data/unigram_trained.json" tokenizer.save(filename) os.remove(filename) def test_train_parallelism_with_custom_pretokenizer(self, train_files): class GoodCustomPretok: def split(self, n, normalized): # Here we just test that we can return a List[NormalizedString], it # does not really make sense to return twice the same otherwise return [normalized, normalized] def pre_tokenize(self, pretok): pretok.split(self.split) custom = pre_tokenizers.PreTokenizer.custom(GoodCustomPretok()) bpe_tokenizer = Tokenizer(models.BPE()) bpe_tokenizer.normalizer = normalizers.Lowercase() bpe_tokenizer.pre_tokenizer = custom if "TOKENIZERS_PARALLELISM" in os.environ: del os.environ["TOKENIZERS_PARALLELISM"] trainer = trainers.BpeTrainer(special_tokens=["<unk>"], show_progress=False) bpe_tokenizer.train([train_files["small"]], trainer=trainer) def test_can_pickle(self): assert isinstance(pickle.loads(pickle.dumps(trainers.UnigramTrainer())), trainers.UnigramTrainer) def test_train_with_special_tokens(self): filename = "tests/data/dummy-unigram-special_tokens-train.txt" with open(filename, "w") as f: f.write( """ [CLS] The Zen of Python, by Tim Peters [SEP] [CLS] Beautiful is better than ugly. [SEP] [CLS] Explicit is better than implicit. [SEP] [CLS] Simple is better than complex. [SEP] [CLS] Complex is better than complicated. [SEP] [CLS] Flat is better than nested. [SEP] [CLS] Sparse is better than dense. [SEP] [CLS] Readability counts. [SEP] [CLS] Special cases aren't special enough to break the rules. [SEP] [CLS] Although practicality beats purity. [SEP] [CLS] Errors should never pass silently. [SEP] [CLS] Unless explicitly silenced. [SEP] [CLS] In the face of ambiguity, refuse the temptation to guess. [SEP] [CLS] There should be one-- and preferably only one --obvious way to do it. [SEP] [CLS] Although that way may not be obvious at first unless you're Dutch. [SEP] [CLS] Now is better than never. [SEP] [CLS] Although never is often better than *right* now. [SEP] [CLS] If the implementation is hard to explain, it's a bad idea. [SEP] [CLS] If the implementation is easy to explain, it may be a good idea. [SEP] [CLS] Namespaces are one honking great idea -- let's do more of those! [SEP] """ ) tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]" ) tokenizer.train([filename], trainer=trainer) assert tokenizer.encode("[CLS] This is a test [SEP]").tokens == [ "[CLS]", " T", "h", "i", "s", " is ", "a", " ", "te", "s", "t ", "[SEP]", ] tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 tokenizer = Tokenizer(models.Unigram()) trainer = trainers.UnigramTrainer( show_progress=False, special_tokens=["[PAD]", "[SEP]", "[CLS]", "[UNK]"], unk_token="[UNK]", vocab_size=100, ) tokenizer.train([filename], trainer=trainer) assert tokenizer.get_vocab_size() == 100 def test_cannot_train_different_model(self): tokenizer = Tokenizer(models.BPE()) trainer = trainers.UnigramTrainer(show_progress=False) with pytest.raises(Exception, match="UnigramTrainer can only train a Unigram"): tokenizer.train([], trainer) def test_can_modify(self): trainer = trainers.UnigramTrainer( vocab_size=12345, show_progress=False, special_tokens=["1", AddedToken("2", lstrip=True)], initial_alphabet=["a", "b", "c"], ) assert trainer.vocab_size == 12345 assert trainer.show_progress == False assert trainer.special_tokens == [ AddedToken("1", normalized=False, special=True), AddedToken("2", lstrip=True, normalized=False, special=True), ] assert sorted(trainer.initial_alphabet) == ["a", "b", "c"] # Modify these trainer.vocab_size = 20000 assert trainer.vocab_size == 20000 trainer.show_progress = True assert trainer.show_progress == True trainer.special_tokens = [] assert trainer.special_tokens == [] trainer.initial_alphabet = ["d", "z"] assert sorted(trainer.initial_alphabet) == ["d", "z"] def test_continuing_prefix_trainer_mismatch(self): UNK = "[UNK]" special_tokens = [UNK] tokenizer = Tokenizer(models.BPE(unk_token=UNK, continuing_subword_prefix="##")) trainer = trainers.BpeTrainer(special_tokens=special_tokens) tokenizer.pre_tokenizer = pre_tokenizers.Sequence( [pre_tokenizers.Whitespace(), pre_tokenizers.Digits(individual_digits=True)] ) tokenizer.train(files=["data/big.txt"], trainer=trainer) tokenizer.save("data/tokenizer.json") tokenizer.from_file("data/tokenizer.json")
tokenizers/bindings/python/tests/bindings/test_trainers.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_trainers.py", "repo_id": "tokenizers", "token_count": 4958 }
# Added Tokens <tokenizerslangcontent> <python> ## AddedToken [[autodoc]] tokenizers.AddedToken - content - lstrip - normalized - rstrip - single_word </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/added-tokens.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/added-tokens.mdx", "repo_id": "tokenizers", "token_count": 134 }
# Quicktour Let's have a quick look at the 🀗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. ## Build a tokenizer from scratch To illustrate how fast the 🀗 Tokenizers library is, let's train a new tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) (516M of text) in just a few seconds. First things first, you will need to download this dataset and unzip it with: ``` bash wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip unzip wikitext-103-raw-v1.zip ``` ### Training the tokenizer In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information about the different type of tokenizers, check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in the 🀗 Transformers documentation. Here, training the tokenizer means it will learn merge rules by: - Start with all the characters present in the training corpus as tokens. - Identify the most common pair of tokens and merge it into one token. - Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want. The main API of the library is the `class` `Tokenizer`, here is how we instantiate one with a BPE model: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_tokenizer", "end-before": "END quicktour_init_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_tokenizer", "end-before": "END init_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To train our tokenizer on the wikitext files, we will need to instantiate a [trainer]{.title-ref}, in this case a `BpeTrainer` <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_trainer", "end-before": "END quicktour_init_trainer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_trainer", "end-before": "END init_trainer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the training arguments like `vocab_size` or `min_frequency` (here left at their default values of 30,000 and 0) but the most important part is to give the `special_tokens` we plan to use later on (they are not used at all during training) so that they get inserted in the vocabulary. <Tip> The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0, `"[CLS]"` will get the ID 1 and so forth. </Tip> We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that will split our inputs into words, we might get tokens that overlap several words: for instance we could get an `"it is"` token since those two words often appear next to each other. Using a pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting on whitespace. <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_pretok", "end-before": "END quicktour_init_pretok", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_pretok", "end-before": "END init_pretok", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Now, we can just call the `Tokenizer.train` method with any list of files we want to use: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_train", "end-before": "END quicktour_train", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START train", "end-before": "END train", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This should only take a few seconds to train our tokenizer on the full wikitext dataset! To save the tokenizer in one file that contains all its configuration and vocabulary, just use the `Tokenizer.save` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_save", "end-before": "END quicktour_save", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START save", "end-before": "END save", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and you can reload your tokenizer from that file with the `Tokenizer.from_file` `classmethod`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 12} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_reload_tokenizer", "end-before": "END quicktour_reload_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Using the tokenizer Now that we have trained a tokenizer, we can use it on any text we want with the `Tokenizer.encode` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode", "end-before": "END quicktour_encode", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode", "end-before": "END encode", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> This applied the full pipeline of the tokenizer on the text, returning an `Encoding` object. To learn more about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline). This `Encoding` object then has all the attributes you need for your deep learning model (or other). The `tokens` attribute contains the segmentation of your text in tokens: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_tokens", "end-before": "END quicktour_print_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_tokens", "end-before": "END print_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Similarly, the `ids` attribute will contain the index of each of those tokens in the tokenizer's vocabulary: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_ids", "end-before": "END quicktour_print_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_ids", "end-before": "END print_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> An important feature of the 🀗 Tokenizers library is that it comes with full alignment tracking, meaning you can always get the part of your original sentence that corresponds to a given token. Those are stored in the `offsets` attribute of our `Encoding` object. For instance, let's assume we would want to find back what caused the `"[UNK]"` token to appear, which is the token at index 9 in the list, we can just ask for the offset at the index: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_offsets", "end-before": "END quicktour_print_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_offsets", "end-before": "END print_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> and those are the indices that correspond to the emoji in the original sentence: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_use_offsets", "end-before": "END quicktour_use_offsets", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START use_offsets", "end-before": "END use_offsets", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ### Post-processing We might want our tokenizer to automatically add special tokens, like `"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor. `TemplateProcessing` is the most commonly used, you just have to specify a template for the processing of single sentences and pairs of sentences, along with the special tokens and their IDs. When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1 and 2 of our list of special tokens, so this should be their IDs. To double-check, we can use the `Tokenizer.token_to_id` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_check_sep", "end-before": "END quicktour_check_sep", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START check_sep", "end-before": "END check_sep", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Here is how we can set the post-processing to give us the traditional BERT inputs: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_init_template_processing", "end-before": "END quicktour_init_template_processing", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START init_template_processing", "end-before": "END init_template_processing", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Let's go over this snippet of code in more details. First we specify the template for single sentences: those should have the form `"[CLS] $A [SEP]"` where `$A` represents our sentence. Then, we specify the template for sentence pairs, which should have the form `"[CLS] $A [SEP] $B [SEP]"` where `$A` represents the first sentence and `$B` the second one. The `:1` added in the template represent the `type IDs` we want for each part of our input: it defaults to 0 for everything (which is why we don't have `$A:0`) and here we set it to 1 for the tokens of the second sentence and the last `"[SEP]"` token. Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary. To check out this worked properly, let's try to encode the same sentence as before: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens", "end-before": "END quicktour_print_special_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens", "end-before": "END print_special_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> To check the results on a pair of sentences, we just pass the two sentences to `Tokenizer.encode`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_special_tokens_pair", "end-before": "END quicktour_print_special_tokens_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_special_tokens_pair", "end-before": "END print_special_tokens_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> You can then check the type IDs attributed to each token is correct with <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_type_ids", "end-before": "END quicktour_print_type_ids", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_type_ids", "end-before": "END print_type_ids", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along. ### Encoding multiple sentences in a batch To get the full speed of the 🀗 Tokenizers library, it's best to process your texts by batches by using the `Tokenizer.encode_batch` method: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch", "end-before": "END quicktour_encode_batch", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch", "end-before": "END encode_batch", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> The output is then a list of `Encoding` objects like the ones we saw before. You can process together as many texts as you like, as long as it fits in memory. To process a batch of sentences pairs, pass two lists to the `Tokenizer.encode_batch` method: the list of sentences A and the list of sentences B: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_encode_batch_pair", "end-before": "END quicktour_encode_batch_pair", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START encode_batch_pair", "end-before": "END encode_batch_pair", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> When encoding multiple sentences, you can automatically pad the outputs to the longest sentence present by using `Tokenizer.enable_padding`, with the `pad_token` and its ID (which we can double-check the id for the padding token with `Tokenizer.token_to_id` like before): <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_enable_padding", "end-before": "END quicktour_enable_padding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START enable_padding", "end-before": "END enable_padding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can set the `direction` of the padding (defaults to the right) or a given `length` if we want to pad every sample to that specific number (here we leave it unset to pad to the size of the longest text). <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_batch_tokens", "end-before": "END quicktour_print_batch_tokens", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_batch_tokens", "end-before": "END print_batch_tokens", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> In this case, the `attention mask` generated by the tokenizer takes the padding into account: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_quicktour.py", "language": "python", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START quicktour_print_attention_mask", "end-before": "END quicktour_print_attention_mask", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/quicktour.test.ts", "language": "js", "start-after": "START print_attention_mask", "end-before": "END print_attention_mask", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ## Pretrained <tokenizerslangcontent> <python> ### Using a pretrained tokenizer You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is available in the repository. ```python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") ``` ### Importing a pretrained tokenizer from legacy vocabulary files You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file. For instance, here is how to import the classic pretrained BERT tokenizer: ```python from tokenizers import BertWordPieceTokenizer tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) ``` as long as you have downloaded the file `bert-base-uncased-vocab.txt` with ```bash wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt ``` </python> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/quicktour.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/quicktour.mdx", "repo_id": "tokenizers", "token_count": 7936 }
Components ==================================================================================================== When building a Tokenizer, you can attach various types of components to this Tokenizer in order to customize its behavior. This page lists most provided components. .. _normalizers: .. entities:: python BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence([NFKC(), Lowercase()])`` PreTokenizer.Sequence ``Sequence([Punctuation(), WhitespaceSplit()])`` SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`merged_with_previous` SplitDelimiterBehavior.merged_with_next :obj:`merged_with_next` SplitDelimiterBehavior.contiguous :obj:`contiguous` .. entities:: rust BertNormalizer.clean_text clean_text BertNormalizer.handle_chinese_chars handle_chinese_chars BertNormalizer.strip_accents strip_accents BertNormalizer.lowercase lowercase Normalizer.Sequence ``Sequence::new(vec![NFKC, Lowercase])`` PreTokenizer.Sequence ``Sequence::new(vec![Punctuation, WhitespaceSplit])`` SplitDelimiterBehavior.removed :obj:`Removed` SplitDelimiterBehavior.isolated :obj:`Isolated` SplitDelimiterBehavior.merged_with_previous :obj:`MergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`MergedWithNext` SplitDelimiterBehavior.contiguous :obj:`Contiguous` .. entities:: node BertNormalizer.clean_text cleanText BertNormalizer.handle_chinese_chars handleChineseChars BertNormalizer.strip_accents stripAccents BertNormalizer.lowercase lowercase Normalizer.Sequence .. PreTokenizer.Sequence .. SplitDelimiterBehavior.removed :obj:`removed` SplitDelimiterBehavior.isolated :obj:`isolated` SplitDelimiterBehavior.merged_with_previous :obj:`mergedWithPrevious` SplitDelimiterBehavior.merged_with_next :obj:`mergedWithNext` SplitDelimiterBehavior.contiguous :obj:`contiguous` Normalizers ---------------------------------------------------------------------------------------------------- A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as relevant for a given use case. Some common examples of normalization are the Unicode normalization algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc... The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This is essential to allow mapping from the generated tokens back to the input text. The ``Normalizer`` is optional. .. list-table:: :header-rows: 1 * - Name - Description - Example * - NFD - NFD unicode normalization - * - NFKD - NFKD unicode normalization - * - NFC - NFC unicode normalization - * - NFKC - NFKC unicode normalization - * - Lowercase - Replaces all uppercase to lowercase - Input: ``HELLO ᜈΔΥΣΣΕΎΣ`` Output: ``hello ᜀΎυσσεύς`` * - Strip - Removes all whitespace characters on the specified sides (left, right or both) of the input - Input: ``" hi "`` Output: ``"hi"`` * - StripAccents - Removes all accent symbols in unicode (to be used with NFD for consistency) - Input: ``é`` Output: ``e`` * - Replace - Replaces a custom string or regexp and changes it with given content - ``Replace("a", "e")`` will behave like this: Input: ``"banana"`` Output: ``"benene"`` * - BertNormalizer - Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: - :entity:`BertNormalizer.clean_text` - :entity:`BertNormalizer.handle_chinese_chars` - :entity:`BertNormalizer.strip_accents` - :entity:`BertNormalizer.lowercase` - * - Sequence - Composes multiple normalizers that will run in the provided order - :entity:`Normalizer.Sequence` .. _pre-tokenizers: Pre tokenizers ---------------------------------------------------------------------------------------------------- The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple "splits". For example if you don't want to have whitespaces inside a token, then you can have a ``PreTokenizer`` that splits on these whitespaces. You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below). The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This is necessary to allow some complicated algorithms that require to split before normalizing (e.g. the ByteLevel) .. list-table:: :header-rows: 1 * - Name - Description - Example * - ByteLevel - Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: - Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters. - A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉) - For non ascii characters, it gets completely unreadable, but it works nonetheless! - Input: ``"Hello my friend, how are you?"`` Output: ``"Hello", "Ä my", Ä friend", ",", "Ä how", "Ä are", "Ä you", "?"`` * - Whitespace - Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+`` - Input: ``"Hello there!"`` Output: ``"Hello", "there", "!"`` * - WhitespaceSplit - Splits on any whitespace character - Input: ``"Hello there!"`` Output: ``"Hello", "there!"`` * - Punctuation - Will isolate all punctuation characters - Input: ``"Hello?"`` Output: ``"Hello", "?"`` * - Metaspace - Splits on whitespaces and replaces them with a special char "▁" (U+2581) - Input: ``"Hello there"`` Output: ``"Hello", "▁there"`` * - CharDelimiterSplit - Splits on a given character - Example with ``x``: Input: ``"Helloxthere"`` Output: ``"Hello", "there"`` * - Digits - Splits the numbers from any other characters. - Input: ``"Hello123there"`` Output: ```"Hello", "123", "there"``` * - Split - Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. - pattern should be either a custom string or regexp. - behavior should be one of: * :entity:`SplitDelimiterBehavior.removed` * :entity:`SplitDelimiterBehavior.isolated` * :entity:`SplitDelimiterBehavior.merged_with_previous` * :entity:`SplitDelimiterBehavior.merged_with_next` * :entity:`SplitDelimiterBehavior.contiguous` - invert should be a boolean flag. - Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`: Input: ``"Hello, how are you?"`` Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"``` * - Sequence - Lets you compose multiple ``PreTokenizer`` that will be run in the given order - :entity:`PreTokenizer.Sequence` .. _models: Models ---------------------------------------------------------------------------------------------------- Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory component of a Tokenizer. .. list-table:: :header-rows: 1 * - Name - Description * - WordLevel - This is the "classic" tokenization algorithm. It let's you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. *Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by this model directly, it simply maps input tokens to IDs* * - BPE - One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having "unk" (unknown) tokens. * - WordPiece - This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don't exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting a word). * - Unigram - Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. .. _post-processors: PostProcessor ---------------------------------------------------------------------------------------------------- After the whole pipeline, we sometimes want to insert some special tokens before feed a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor`` is the component doing just that. .. list-table:: :header-rows: 1 * - Name - Description - Example * - TemplateProcessing - Let's you easily template the post processing, adding special tokens, and specifying the ``type_id`` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. - Example, when specifying a template with these values: - single: ``"[CLS] $A [SEP]"`` - pair: ``"[CLS] $A [SEP] $B [SEP]"`` - special tokens: - ``"[CLS]"`` - ``"[SEP]"`` Input: ``("I like this", "but not this")`` Output: ``"[CLS] I like this [SEP] but not this [SEP]"`` .. _decoders: Decoders ---------------------------------------------------------------------------------------------------- The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text. Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be reverted for example. .. list-table:: :header-rows: 1 * - Name - Description * - ByteLevel - Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. * - Metaspace - Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifier ``▁`` to identify whitespaces, and so this Decoder helps with decoding these. * - WordPiece - Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing subwords, and so this Decoder helps with decoding these.
tokenizers/docs/source/components.rst/0
{ "file_path": "tokenizers/docs/source/components.rst", "repo_id": "tokenizers", "token_count": 4223 }
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> {{readme}}
tokenizers/tokenizers/README.tpl/0
{ "file_path": "tokenizers/tokenizers/README.tpl", "repo_id": "tokenizers", "token_count": 259 }
pub mod bpe; pub mod byte_fallback; pub mod ctc; pub mod fuse; pub mod sequence; pub mod strip; pub mod wordpiece; // Re-export these as decoders pub use super::pre_tokenizers::byte_level; pub use super::pre_tokenizers::metaspace; use serde::{Deserialize, Deserializer, Serialize}; use crate::decoders::bpe::BPEDecoder; use crate::decoders::byte_fallback::ByteFallback; use crate::decoders::ctc::CTC; use crate::decoders::fuse::Fuse; use crate::decoders::sequence::Sequence; use crate::decoders::strip::Strip; use crate::decoders::wordpiece::WordPiece; use crate::normalizers::replace::Replace; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::metaspace::Metaspace; use crate::{Decoder, Result}; #[derive(Serialize, Clone, Debug)] #[serde(untagged)] pub enum DecoderWrapper { BPE(BPEDecoder), ByteLevel(ByteLevel), WordPiece(WordPiece), Metaspace(Metaspace), CTC(CTC), Sequence(Sequence), Replace(Replace), Fuse(Fuse), Strip(Strip), ByteFallback(ByteFallback), } impl<'de> Deserialize<'de> for DecoderWrapper { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] pub struct Tagged { #[serde(rename = "type")] variant: EnumType, #[serde(flatten)] rest: serde_json::Value, } #[derive(Serialize, Deserialize)] pub enum EnumType { BPEDecoder, ByteLevel, WordPiece, Metaspace, CTC, Sequence, Replace, Fuse, Strip, ByteFallback, } #[derive(Deserialize)] #[serde(untagged)] pub enum DecoderHelper { Tagged(Tagged), Legacy(serde_json::Value), } #[derive(Deserialize)] #[serde(untagged)] pub enum DecoderUntagged { BPE(BPEDecoder), ByteLevel(ByteLevel), WordPiece(WordPiece), Metaspace(Metaspace), CTC(CTC), Sequence(Sequence), Replace(Replace), Fuse(Fuse), Strip(Strip), ByteFallback(ByteFallback), } let helper = DecoderHelper::deserialize(deserializer).expect("Helper"); Ok(match helper { DecoderHelper::Tagged(model) => { let mut values: serde_json::Map<String, serde_json::Value> = serde_json::from_value(model.rest).map_err(serde::de::Error::custom)?; values.insert( "type".to_string(), serde_json::to_value(&model.variant).map_err(serde::de::Error::custom)?, ); let values = serde_json::Value::Object(values); match model.variant { EnumType::BPEDecoder => DecoderWrapper::BPE( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::ByteLevel => DecoderWrapper::ByteLevel( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::WordPiece => DecoderWrapper::WordPiece( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Metaspace => DecoderWrapper::Metaspace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::CTC => DecoderWrapper::CTC( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Sequence => DecoderWrapper::Sequence( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Replace => DecoderWrapper::Replace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Fuse => DecoderWrapper::Fuse( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Strip => DecoderWrapper::Strip( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::ByteFallback => DecoderWrapper::ByteFallback( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), } } DecoderHelper::Legacy(value) => { let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?; match untagged { DecoderUntagged::BPE(dec) => DecoderWrapper::BPE(dec), DecoderUntagged::ByteLevel(dec) => DecoderWrapper::ByteLevel(dec), DecoderUntagged::WordPiece(dec) => DecoderWrapper::WordPiece(dec), DecoderUntagged::Metaspace(dec) => DecoderWrapper::Metaspace(dec), DecoderUntagged::CTC(dec) => DecoderWrapper::CTC(dec), DecoderUntagged::Sequence(dec) => DecoderWrapper::Sequence(dec), DecoderUntagged::Replace(dec) => DecoderWrapper::Replace(dec), DecoderUntagged::Fuse(dec) => DecoderWrapper::Fuse(dec), DecoderUntagged::Strip(dec) => DecoderWrapper::Strip(dec), DecoderUntagged::ByteFallback(dec) => DecoderWrapper::ByteFallback(dec), } } }) } } impl Decoder for DecoderWrapper { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { match self { Self::BPE(bpe) => bpe.decode_chain(tokens), Self::ByteLevel(bl) => bl.decode_chain(tokens), Self::Metaspace(ms) => ms.decode_chain(tokens), Self::WordPiece(wp) => wp.decode_chain(tokens), Self::CTC(ctc) => ctc.decode_chain(tokens), Self::Sequence(seq) => seq.decode_chain(tokens), Self::Replace(seq) => seq.decode_chain(tokens), Self::ByteFallback(bf) => bf.decode_chain(tokens), Self::Strip(bf) => bf.decode_chain(tokens), Self::Fuse(bf) => bf.decode_chain(tokens), } } } impl_enum_from!(BPEDecoder, DecoderWrapper, BPE); impl_enum_from!(ByteLevel, DecoderWrapper, ByteLevel); impl_enum_from!(ByteFallback, DecoderWrapper, ByteFallback); impl_enum_from!(Fuse, DecoderWrapper, Fuse); impl_enum_from!(Strip, DecoderWrapper, Strip); impl_enum_from!(Metaspace, DecoderWrapper, Metaspace); impl_enum_from!(WordPiece, DecoderWrapper, WordPiece); impl_enum_from!(CTC, DecoderWrapper, CTC); impl_enum_from!(Sequence, DecoderWrapper, Sequence); impl_enum_from!(Replace, DecoderWrapper, Replace); #[cfg(test)] mod tests { use super::*; #[test] fn decoder_serialization() { let oldjson = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true,"prepend_scheme":"always"}]}"#; let olddecoder: DecoderWrapper = serde_json::from_str(oldjson).unwrap(); let oldserialized = serde_json::to_string(&olddecoder).unwrap(); let json = r#"{"type":"Sequence","decoders":[{"type":"ByteFallback"},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always","split":true}]}"#; assert_eq!(oldserialized, json); let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_other_no_arg() { let json = r#"{"type":"Sequence","decoders":[{"type":"Fuse"},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always","split":true}]}"#; let decoder: DecoderWrapper = serde_json::from_str(json).unwrap(); let serialized = serde_json::to_string(&decoder).unwrap(); assert_eq!(serialized, json); } #[test] fn decoder_serialization_no_decode() { let json = r#"{"type":"Sequence","decoders":[{},{"type":"Metaspace","replacement":"▁","prepend_scheme":"always"}]}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!( format!("{err}"), "data did not match any variant of untagged enum DecoderUntagged" ), _ => panic!("Expected error"), } let json = r#"{"replacement":"▁","prepend_scheme":"always"}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!( format!("{err}"), "data did not match any variant of untagged enum DecoderUntagged" ), _ => panic!("Expected error"), } let json = r#"{"type":"Sequence","prepend_scheme":"always"}"#; let parse = serde_json::from_str::<DecoderWrapper>(json); match parse { Err(err) => assert_eq!(format!("{err}"), "missing field `decoders`"), _ => panic!("Expected error"), } } }
tokenizers/tokenizers/src/decoders/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/mod.rs", "repo_id": "tokenizers", "token_count": 4660 }
use std::collections::HashMap; use std::hash::Hash; #[derive(Default)] pub struct TrieBuilder<Label> { trie: Trie<Label>, } impl<Label: Eq + Hash + Copy> TrieBuilder<Label> { pub fn push(&mut self, element: &[Label]) { self.trie.push(element); } pub fn build(self) -> Trie<Label> { self.trie } } #[derive(Clone)] pub struct Trie<Label> { root: Node<Label>, } impl<Label: Eq + Hash + Copy> Trie<Label> { pub fn push(&mut self, element: &[Label]) { let mut node = &mut self.root; for label in element.iter() { node = node.children.entry(*label).or_default(); } node.is_leaf = true; } pub fn common_prefix_search<T>(&self, iterator: T) -> TrieIterator<Label, T> where T: Iterator<Item = Label>, { TrieIterator { node: &self.root, prefix: vec![], iterator, } } } pub struct TrieIterator<'a, Label, T> { node: &'a Node<Label>, prefix: Vec<Label>, iterator: T, } impl<Label, T> Iterator for TrieIterator<'_, Label, T> where Label: Eq + Hash + Copy, T: Iterator<Item = Label>, { type Item = Vec<Label>; fn next(&mut self) -> Option<Self::Item> { loop { let label = self.iterator.next()?; self.prefix.push(label); let child = self.node.children.get(&label)?; self.node = child; if self.node.is_leaf { return Some(self.prefix.clone()); } } } } impl<Label> Default for Trie<Label> { fn default() -> Self { Self { root: Node::default(), } } } #[derive(Clone)] pub struct Node<Label> { is_leaf: bool, children: HashMap<Label, Node<Label>>, } impl<Label> Default for Node<Label> { fn default() -> Self { Self { is_leaf: false, children: HashMap::new(), } } }
tokenizers/tokenizers/src/models/unigram/trie.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/trie.rs", "repo_id": "tokenizers", "token_count": 944 }
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_bert_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct BertPreTokenizer; impl PreTokenizer for BertPreTokenizer { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?; pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated)) } } #[cfg(test)] mod tests { use super::*; use crate::{NormalizedString, OffsetReferential, OffsetType}; #[test] fn basic() { let pretok = BertPreTokenizer; let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey", (0, 3)), ("friend", (4, 10)), ("!", (10, 11)), ("How", (16, 19)), ("are", (20, 23)), ("you", (24, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn chinese_chars() { let mut n = NormalizedString::from("野口里䜳 Noguchi Rika"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); let mut pretokenized = n.into(); let pretok = BertPreTokenizer; pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("野", (0, 3)), ("口", (3, 6)), ("里", (6, 9)), ("䜳", (9, 12)), ("Noguchi", (13, 20)), ("Rika", (21, 25)) ] ); } }
tokenizers/tokenizers/src/pre_tokenizers/bert.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/bert.rs", "repo_id": "tokenizers", "token_count": 1460 }
use crate::processors::PostProcessorWrapper; use crate::tokenizer::{Encoding, PostProcessor, Result}; use crate::utils::macro_rules_attribute; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Sequence { processors: Vec<PostProcessorWrapper>, } impl Sequence { pub fn new(processors: Vec<PostProcessorWrapper>) -> Self { Self { processors } } pub fn get(&self, index: usize) -> Option<&PostProcessorWrapper> { self.processors.get(index) } pub fn get_mut(&mut self, index: usize) -> Option<&mut PostProcessorWrapper> { self.processors.get_mut(index) } pub fn set_mut(&mut self, index: usize, post_proc: PostProcessorWrapper) { self.processors[index] = post_proc; } } impl AsRef<[PostProcessorWrapper]> for Sequence { fn as_ref(&self) -> &[PostProcessorWrapper] { &self.processors } } impl AsMut<[PostProcessorWrapper]> for Sequence { fn as_mut(&mut self) -> &mut [PostProcessorWrapper] { &mut self.processors } } impl IntoIterator for Sequence { type Item = PostProcessorWrapper; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.processors.into_iter() } } impl PostProcessor for Sequence { fn added_tokens(&self, is_pair: bool) -> usize { self.processors .iter() .map(|p| p.added_tokens(is_pair)) .sum::<usize>() } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { for processor in &self.processors { encodings = processor.process_encodings(encodings, add_special_tokens)?; } Ok(encodings) } } #[cfg(test)] mod tests { use super::*; use crate::processors::{ByteLevel, PostProcessorWrapper}; use crate::tokenizer::{Encoding, PostProcessor}; use std::collections::HashMap; use std::iter::FromIterator; #[test] fn process_chain() { let start = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ä ".into(), "Ä Ä Ä Ä HelloÄ Ä ".into(), "Ä Ä Hello".into(), "HelloÄ Ä ".into(), "Ä Ä Ä Ä ".into(), ], vec![], vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)], vec![], vec![], vec![], HashMap::new(), ); let bytelevel = ByteLevel::default().trim_offsets(true); let sequence = Sequence::new(vec![PostProcessorWrapper::ByteLevel(bytelevel)]); let expected = Encoding::new( vec![0; 5], vec![0; 5], vec![ "Ä ".into(), "Ä Ä Ä Ä HelloÄ Ä ".into(), "Ä Ä Hello".into(), "HelloÄ Ä ".into(), "Ä Ä Ä Ä ".into(), ], vec![], vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5)]), ); assert_eq!( expected, bytelevel.process(start.clone(), None, false).unwrap() ); assert_eq!( expected, sequence.process(start.clone(), None, false).unwrap() ); let pair_expected = Encoding::new( vec![0; 10], vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1], vec![ "Ä ".into(), "Ä Ä Ä Ä HelloÄ Ä ".into(), "Ä Ä Hello".into(), "HelloÄ Ä ".into(), "Ä Ä Ä Ä ".into(), "Ä ".into(), "Ä Ä Ä Ä HelloÄ Ä ".into(), "Ä Ä Hello".into(), "HelloÄ Ä ".into(), "Ä Ä Ä Ä ".into(), ], vec![], vec![ (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), (0, 0), (4, 9), (13, 18), (18, 23), (29, 29), ], vec![], vec![], vec![], HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]), ); assert_eq!( pair_expected, bytelevel .process(start.clone(), Some(start.clone()), false) .unwrap() ); assert_eq!( pair_expected, sequence.process(start.clone(), Some(start), false).unwrap() ); } }
tokenizers/tokenizers/src/processors/sequence.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/sequence.rs", "repo_id": "tokenizers", "token_count": 2672 }
//! //! This module defines helpers to allow optional Rayon usage. //! use rayon::iter::IterBridge; use rayon::prelude::*; use rayon_cond::CondIterator; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicU8; use std::sync::atomic::Ordering; // Re-export rayon current_num_threads pub use rayon::current_num_threads; pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM"; static USED_PARALLELISM: AtomicBool = AtomicBool::new(false); static PARALLELISM: AtomicU8 = AtomicU8::new(0); /// Check if the TOKENIZERS_PARALLELISM env variable has been explicitly set pub fn is_parallelism_configured() -> bool { std::env::var(ENV_VARIABLE).is_ok() || get_override_parallelism().is_some() } /// Check if at some point we used a parallel iterator pub fn has_parallelism_been_used() -> bool { USED_PARALLELISM.load(Ordering::SeqCst) } /// Get internally set parallelism fn get_override_parallelism() -> Option<bool> { match PARALLELISM.load(Ordering::SeqCst) { 0 => None, 1 => Some(false), 2 => Some(true), _ => unreachable!(), } } /// Get the currently set value for `TOKENIZERS_PARALLELISM` env variable fn get_env_parallelism() -> bool { match std::env::var(ENV_VARIABLE) { Ok(mut v) => { v.make_ascii_lowercase(); !matches!(v.as_ref(), "" | "off" | "false" | "f" | "no" | "n" | "0") } Err(_) => true, // If we couldn't get the variable, we use the default } } pub fn get_parallelism() -> bool { if let Some(parallel) = get_override_parallelism() { parallel } else { get_env_parallelism() } } /// Set the value for `TOKENIZERS_PARALLELISM` for the current process pub fn set_parallelism(val: bool) { PARALLELISM.store(if val { 2 } else { 1 }, Ordering::SeqCst); } /// Allows to convert into an iterator that can be executed either parallelly or serially. /// /// The choice is made according to the currently set `TOKENIZERS_PARALLELISM` environment variable. /// This variable can have one of the following values /// - False => "" (empty value), "false", "f", "off", "no", "n", "0" /// - True => Any other value /// pub trait MaybeParallelIterator<P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, { /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn into_maybe_par_iter(self) -> CondIterator<P, S>; /// Convert ourself in a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S>; } impl<P, S, I> MaybeParallelIterator<P, S> for I where I: IntoParallelIterator<Iter = P, Item = P::Item> + IntoIterator<IntoIter = S, Item = S::Item>, P: ParallelIterator, S: Iterator<Item = P::Item>, { fn into_maybe_par_iter(self) -> CondIterator<P, S> { let parallelism = get_parallelism(); if parallelism { USED_PARALLELISM.store(true, Ordering::SeqCst); } CondIterator::new(self, parallelism) } fn into_maybe_par_iter_cond(self, cond: bool) -> CondIterator<P, S> { if cond { self.into_maybe_par_iter() } else { CondIterator::from_serial(self) } } } /// Shared reference version of MaybeParallelIterator, works the same but returns an iterator /// over references, does not consume self pub trait MaybeParallelRefIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S>; fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefIterator<'data, P, S> for I where &'data I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter(&'data self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_cond(&'data self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Exclusive reference version of MaybeParallelIterator, works the same but returns an iterator /// over mutable references, does not consume self pub trait MaybeParallelRefMutIterator<'data, P, S> where P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S>; fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S>; } impl<'data, P, S, I: 'data + ?Sized> MaybeParallelRefMutIterator<'data, P, S> for I where &'data mut I: MaybeParallelIterator<P, S>, P: ParallelIterator, S: Iterator<Item = P::Item>, P::Item: 'data, { fn maybe_par_iter_mut(&'data mut self) -> CondIterator<P, S> { self.into_maybe_par_iter() } fn maybe_par_iter_mut_cond(&'data mut self, cond: bool) -> CondIterator<P, S> { self.into_maybe_par_iter_cond(cond) } } /// Converts any serial iterator into a CondIterator, that can either run parallelly or serially. pub trait MaybeParallelBridge<T, S> where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S>; fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S>; } impl<T, S> MaybeParallelBridge<T, S> for S where S: Iterator<Item = T> + Send, T: Send, { fn maybe_par_bridge(self) -> CondIterator<IterBridge<S>, S> { let iter = CondIterator::from_serial(self); if get_parallelism() { USED_PARALLELISM.store(true, Ordering::SeqCst); CondIterator::from_parallel(iter.into_parallel().right().unwrap()) } else { iter } } fn maybe_par_bridge_cond(self, cond: bool) -> CondIterator<IterBridge<S>, S> { if cond { self.maybe_par_bridge() } else { CondIterator::from_serial(self) } } } /// Allows to convert into `chunks` that can be executed either parallelly or serially. pub trait MaybeParallelSlice<'data, T> where T: Sync, { /// Create a CondIterator, that will be executed either in parallel or serially, /// based solely on the `TOKENIZERS_PARALLELISM` environment variable fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; /// Create a CondIterator, that will be executed either in parallel or serially, /// based on both the `TOKENIZERS_PARALLELISM` environment variable and the provided bool. /// Both must be true to run with parallelism activated. fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>>; } impl<T> MaybeParallelSlice<'_, T> for [T] where T: Sync, { fn maybe_par_chunks( &'_ self, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { let parallelism = get_parallelism(); if parallelism { CondIterator::from_parallel(self.par_chunks(chunk_size)) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } fn maybe_par_chunks_cond( &'_ self, cond: bool, chunk_size: usize, ) -> CondIterator<rayon::slice::Chunks<'_, T>, std::slice::Chunks<'_, T>> { if cond { self.maybe_par_chunks(chunk_size) } else { CondIterator::from_serial(self.chunks(chunk_size)) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_maybe_parallel_iterator() { let mut v = vec![1u32, 2, 3, 4, 5, 6]; assert_eq!(v.maybe_par_iter().sum::<u32>(), 21); assert_eq!( v.maybe_par_iter_mut() .map(|v| { *v *= 2; *v }) .sum::<u32>(), 42 ); assert_eq!(v.maybe_par_iter().sum::<u32>(), 42); assert_eq!(v.into_maybe_par_iter().sum::<u32>(), 42); } #[test] fn test_maybe_parallel_slice() { let v = [1, 2, 3, 4, 5]; let chunks: Vec<_> = v.maybe_par_chunks(2).collect(); assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]); } }
tokenizers/tokenizers/src/utils/parallelism.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/parallelism.rs", "repo_id": "tokenizers", "token_count": 3698 }
To install via [NPM](https://www.npmjs.com/package/@huggingface/transformers), run: ```bash npm i @huggingface/transformers ``` Alternatively, you can use it in vanilla JS, without any bundler, by using a CDN or static hosting. For example, using [ES Modules](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules), you can import the library with: ```html <script type="module"> import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]'; </script> ```
transformers.js/docs/snippets/2_installation.snippet/0
{ "file_path": "transformers.js/docs/snippets/2_installation.snippet", "repo_id": "transformers.js", "token_count": 176 }
import Chart from 'chart.js/auto'; import Prism from 'prismjs'; // Import code and styles for supported languages import 'prismjs/components/prism-javascript'; import 'prismjs/components/prism-python'; import 'prismjs/components/prism-markdown'; import 'prismjs/components/prism-clike'; import 'prismjs/themes/prism.css' import './theme.css'; import './style.css'; // Initialise worker const worker = new Worker(new URL('./worker.js', import.meta.url), { type: 'module', }); // Define elements const TASK_SELECTOR = document.getElementById('task'); let searchParams = new URLSearchParams(location.search); let defaultDemo = searchParams.get('demo'); if (defaultDemo) { TASK_SELECTOR.value = defaultDemo; } // translation inputs const LANGUAGE_FROM = document.getElementById('language-from'); const LANGUAGE_TO = document.getElementById('language-to'); const INPUT_TEXTBOX = document.getElementById('input-textbox'); const OUTPUT_TEXTBOX = document.getElementById('output-textbox'); // text generation inputs const TEXT_GENERATION_TEXTBOX = document.getElementById('text-generation-textbox'); const TASKS = document.getElementsByClassName('task-settings') const PROGRESS = document.getElementById('progress'); const PROGRESS_BARS = document.getElementById('progress-bars'); const GENERATE_BUTTON = document.getElementById('generate'); const MLM_INPUT_TEXTBOX = document.getElementById('mlm-input-textbox'); const MLM_OUTPUT_TEXTBOX = document.getElementById('mlm-output-textbox'); const SC_INPUT_TEXTBOX = document.getElementById('sc-input-textbox'); const SC_OUTPUT_CANVAS = document.getElementById('sc-canvas'); const TC_INPUT_TEXTBOX = document.getElementById('tc-input-textbox'); const TC_OUTPUT = document.getElementById('tc-output'); const QA_CONTEXT_TEXTBOX = document.getElementById('qa-context-textbox'); const QA_QUESTION_TEXTBOX = document.getElementById('qa-question-textbox'); const QA_ANSWER_TEXTBOX = document.getElementById('qa-answer-textbox'); const SUMMARIZATION_INPUT_TEXTBOX = document.getElementById('summarization-input-textbox'); const SUMMARIZATION_OUTPUT_TEXTBOX = document.getElementById('summarization-output-textbox'); const SPEECH2TEXT_SELECT = document.getElementById('audio-select'); const SPEECH2TEXT_INPUT = document.getElementById('audio-file'); const SPEECH2TEXT_AUDIO = document.getElementById('audio-player'); const SPEECH2TEXT_OUTPUT_TEXTBOX = document.getElementById('speech2text-output-textbox'); const TEXT2IMAGE_SELECT = document.getElementById('image-select'); const TEXT2IMAGE_INPUT = document.getElementById('image-file'); const TEXT2IMAGE_IMG = document.getElementById('image-viewer'); const TEXT2IMAGE_OUTPUT_TEXTBOX = document.getElementById('image2text-output-textbox'); const IMAGE_CLASSIFICATION_SELECT = document.getElementById('ic-select'); const IMAGE_CLASSIFICATION_INPUT = document.getElementById('ic-file'); const IMAGE_CLASSIFICATION_IMG = document.getElementById('ic-viewer'); const IMAGE_CLASSIFICATION_OUTPUT_CANVAS = document.getElementById('ic-canvas'); const CODE_COMPLETION_CONTAINER = document.getElementById('code-completion-container'); const ZSIC_SELECT = document.getElementById('zsic-select'); const ZSIC_INPUT = document.getElementById('zsic-file'); const ZSIC_CLASSES = document.getElementById('zsic-classes'); const ZSIC_IMG = document.getElementById('zsic-viewer'); const ZSIC_OUTPUT_CANVAS = document.getElementById('zsic-canvas'); const OD_SELECT = document.getElementById('od-select'); const OD_INPUT = document.getElementById('od-file'); const OD_IMG = document.getElementById('od-viewer'); const OD_OUTPUT_OVERLAY = document.getElementById('od-overlay'); const OD_OUTPUT_CANVAS = document.getElementById('od-canvas'); const ZSC_INPUT_TEXTBOX = document.getElementById('zsc-input-textbox'); const ZSC_CLASSES = document.getElementById('zsc-classes'); const ZSC_OUTPUT_CANVAS = document.getElementById('zsc-canvas'); const DEFAULT_GREEDY_PARAMS = { max_new_tokens: 50, num_beams: 1, temperature: 1, top_k: 0, do_sample: false } const TASK_DEFAULT_PARAMS = { 'translation': DEFAULT_GREEDY_PARAMS, 'text-generation': { max_new_tokens: 100, num_beams: 1, temperature: 1, top_k: 20, do_sample: true }, 'code-completion': DEFAULT_GREEDY_PARAMS, 'masked-language-modelling': { topk: 5 // number of samples }, 'sequence-classification': {}, 'token-classification': {}, 'zero-shot-classification': { multi_label: false }, 'question-answering': {}, 'summarization': { max_new_tokens: 50, num_beams: 2, temperature: 1, top_k: 0, do_sample: false }, 'automatic-speech-recognition': DEFAULT_GREEDY_PARAMS, 'image-to-text': DEFAULT_GREEDY_PARAMS, 'image-classification': {}, 'zero-shot-image-classification': {}, 'object-detection': {}, }; [ [SPEECH2TEXT_SELECT, SPEECH2TEXT_INPUT, SPEECH2TEXT_AUDIO], [TEXT2IMAGE_SELECT, TEXT2IMAGE_INPUT, TEXT2IMAGE_IMG], [IMAGE_CLASSIFICATION_SELECT, IMAGE_CLASSIFICATION_INPUT, IMAGE_CLASSIFICATION_IMG], [ZSIC_SELECT, ZSIC_INPUT, ZSIC_IMG], [OD_SELECT, OD_INPUT, OD_IMG], ].forEach(x => { let [select, input, media] = x; select.addEventListener('input', (e) => { if (select.options[select.selectedIndex].hasAttribute('show-custom')) { input.style.display = 'block'; } else { input.style.display = 'none'; media.src = select.value } }) input.addEventListener("change", () => { const file = input.files[0]; const url = URL.createObjectURL(file); media.src = url; }); }); const NER_TAGS = { // tag: [textColour, backgroundColour, tagColour] 'ORG': ['#115E59', '#CCFBF1', '#14B8A6'], 'PER': ['#9D174D', '#FCE7F3', '#EC4899'], 'LOC': ['#86198F', '#FAE8FF', '#D946EF'], } // Predefined list of unique colours const COLOURS = [ '255, 99, 132', '54, 162, 235', '255, 206, 86', '75, 192, 192', '153, 102, 255', '255, 159, 64', ] OD_SELECT.addEventListener('change', () => { // Clear overlay and chart data on change OD_OUTPUT_OVERLAY.innerHTML = ''; const chart = CHARTS[OD_OUTPUT_CANVAS.id]; chart.data = structuredClone(DEFAULT_DATA); chart.update(); }); OD_OUTPUT_OVERLAY.addEventListener('mousemove', (e) => { let rects = OD_OUTPUT_OVERLAY.querySelectorAll('rect') let colours = []; let borderColours = []; rects.forEach((rect, i) => { let colour = COLOURS[i % COLOURS.length]; // Display if hovering over background (tagName === 'svg') let toDisplay = e.target.tagName !== 'rect'; if (!toDisplay) { // Perform additional check let bb = rect.getBoundingClientRect() // Check if box intersects with current mouse positition toDisplay = e.clientX >= bb.left && e.clientX <= bb.right && e.clientY >= bb.top && e.clientY <= bb.bottom } if (toDisplay) { // Set back to original rect.style.fillOpacity = 0.1; rect.style.opacity = 1; colours.push(`rgba(${colour}, 0.5)`); borderColours.push(`rgba(${colour}, 1)`); } else { // Hovering over a rect, so set all other rects to 0 opacity rect.style.fillOpacity = 0; rect.style.opacity = 0; colours.push(`rgba(${colour}, 0.05)`); borderColours.push(`rgba(${colour}, 0.5)`); } }) const chart = CHARTS['od-canvas']; chart.data.datasets[0].backgroundColor = colours; chart.data.datasets[0].borderColor = borderColours; chart.update(); }) function updateParams(task) { let params = TASK_DEFAULT_PARAMS[task] if (!params) return; for (let [key, value] of Object.entries(params)) { let element = document.querySelector(`.generation-option[param-name="${key}"]`) if (!element) continue; element.value = value; } } // Parameters const GENERATION_OPTIONS = document.getElementsByClassName('generation-option'); const CHART_OPTIONS = { responsive: true, maintainAspectRatio: false, indexAxis: 'y', scales: { y: { beginAtZero: true, }, x: { min: 0, max: 1, } }, plugins: { legend: { display: false }, }, layout: { padding: { bottom: -5, } }, }; // Initialise all code blocks const CODE_BLOCKS = {}; [...document.querySelectorAll('.code-container')].forEach(element => { // Guide to add editable code block: // https://codepen.io/WebCoder49/pen/dyNyraq // https://css-tricks.com/creating-an-editable-textarea-that-supports-syntax-highlighted-code/ const CODE_HIGHLIGHT = element.querySelector('pre'); const CODE_HIGHLIGHT_CONTENT = element.querySelector('code'); const CODE_COMPLETION_TEXTBOX = element.querySelector('textarea'); let sync_scroll = () => { /* Scroll result to scroll coords of event - sync with textarea */ CODE_HIGHLIGHT.scrollTop = CODE_COMPLETION_TEXTBOX.scrollTop; CODE_HIGHLIGHT.scrollLeft = CODE_COMPLETION_TEXTBOX.scrollLeft; } let update = (text) => { // Handle final newlines (see article) if (text[text.length - 1] == "\n") { text += " "; } // Update code CODE_HIGHLIGHT_CONTENT.innerHTML = escapeHtml(text); // Syntax Highlight Prism.highlightElement(CODE_HIGHLIGHT_CONTENT); } // Update code function let updateCode = (text) => { update(text); sync_scroll(); }; CODE_BLOCKS[element.id] = { update: (text) => { CODE_COMPLETION_TEXTBOX.value = text; updateCode(text); // When updating, set scroll to bottom // https://stackoverflow.com/a/9170709 CODE_COMPLETION_TEXTBOX.scrollTop = CODE_COMPLETION_TEXTBOX.scrollHeight; }, text: () => CODE_COMPLETION_TEXTBOX.value }; CODE_COMPLETION_TEXTBOX.oninput = () => updateCode(CODE_COMPLETION_TEXTBOX.value); CODE_COMPLETION_TEXTBOX.onscroll = sync_scroll; CODE_COMPLETION_TEXTBOX.onkeydown = (event) => { let code = CODE_COMPLETION_TEXTBOX.value; if (event.key == "Tab") { /* Tab key pressed */ event.preventDefault(); // stop normal let before_tab = code.slice(0, CODE_COMPLETION_TEXTBOX.selectionStart); // text before tab let after_tab = code.slice(CODE_COMPLETION_TEXTBOX.selectionEnd, CODE_COMPLETION_TEXTBOX.value.length); // text after tab let cursor_pos = CODE_COMPLETION_TEXTBOX.selectionStart + 1; // where cursor moves after tab - moving forward by 1 char to after tab CODE_COMPLETION_TEXTBOX.value = before_tab + "\t" + after_tab; // add tab char // move cursor CODE_COMPLETION_TEXTBOX.selectionStart = cursor_pos; CODE_COMPLETION_TEXTBOX.selectionEnd = cursor_pos; update(CODE_COMPLETION_TEXTBOX.value); // Update text to include indent } }; }); const DEFAULT_DATA = { labels: ['label', 'label', 'label', 'label', 'label'], datasets: [{ borderWidth: 1 }] } const CHARTS = { 'sc-canvas': new Chart(SC_OUTPUT_CANVAS, { type: 'bar', data: { labels: ['5 stars', '4 stars', '3 stars', '2 stars', '1 star'], datasets: [{ borderWidth: 1 }] }, options: CHART_OPTIONS, }), 'ic-canvas': new Chart(IMAGE_CLASSIFICATION_OUTPUT_CANVAS, { type: 'bar', data: structuredClone(DEFAULT_DATA), options: CHART_OPTIONS }), 'zsic-canvas': new Chart(ZSIC_OUTPUT_CANVAS, { type: 'bar', data: { labels: ['football', 'airport', 'animals'], datasets: [{ borderWidth: 1 }] }, options: CHART_OPTIONS }), 'od-canvas': new Chart(OD_OUTPUT_CANVAS, { type: 'bar', data: structuredClone(DEFAULT_DATA), options: CHART_OPTIONS }), 'zsc-canvas': new Chart(ZSC_OUTPUT_CANVAS, { type: 'bar', data: { labels: ['urgent', 'not urgent', 'phone', 'tablet', 'microwave'], datasets: [{ borderWidth: 1 }] }, options: CHART_OPTIONS }), }; [ [ZSIC_CLASSES, ZSIC_OUTPUT_CANVAS], [ZSC_CLASSES, ZSC_OUTPUT_CANVAS], ].forEach(x => { let [input, chart] = x; input.addEventListener('input', () => { // Update labels of graph let chartToUpdate = CHARTS[chart.id]; chartToUpdate.data.labels = getZSClasses(input); chartToUpdate.data.datasets[0].data = new Array(chartToUpdate.data.labels.length).fill(0); chartToUpdate.update(); }) }); function getZSClasses(elem) { // Get zero-shot classes from input element return elem.value.split(/\s*,+\s*/g).filter(x => x); } function updateVisibility() { // Set default parameters for task updateParams(TASK_SELECTOR.value); for (let element of TASKS) { if (element.getAttribute('task').split(',').includes(TASK_SELECTOR.value)) { element.style.display = 'block'; } else { element.style.display = 'none'; } } } updateVisibility(); // Add event listeners TASK_SELECTOR.addEventListener('input', updateVisibility); function parseValue(value, type) { switch (type) { case 'number': return Number(value); case 'bool': return value === 'true' default: return value } } function isVisible(e) { // https://stackoverflow.com/a/38873788 return !!(e.offsetWidth || e.offsetHeight || e.getClientRects().length); } GENERATE_BUTTON.addEventListener('click', async (e) => { // Set and pass generation settings to web worker let data = { task: TASK_SELECTOR.value, generation: Object.fromEntries([...GENERATION_OPTIONS] .filter(isVisible) // Only use parameters that are visible on screen .map(x => { let value = parseValue(x.value, x.getAttribute('datatype')); return [x.getAttribute('param-name'), value] })) }; switch (TASK_SELECTOR.value) { case 'translation': data.languageFrom = LANGUAGE_FROM.value data.languageTo = LANGUAGE_TO.value data.text = INPUT_TEXTBOX.value data.elementIdToUpdate = OUTPUT_TEXTBOX.id break; case 'text-generation': data.text = TEXT_GENERATION_TEXTBOX.value data.elementIdToUpdate = TEXT_GENERATION_TEXTBOX.id break; case 'code-completion': data.text = CODE_BLOCKS[CODE_COMPLETION_CONTAINER.id].text(); data.elementIdToUpdate = CODE_COMPLETION_CONTAINER.id data.targetType = 'code' break; case 'masked-language-modelling': data.text = MLM_INPUT_TEXTBOX.value data.elementIdToUpdate = MLM_OUTPUT_TEXTBOX.id break; case 'sequence-classification': data.text = SC_INPUT_TEXTBOX.value data.elementIdToUpdate = SC_OUTPUT_CANVAS.id data.targetType = 'chart' break; case 'token-classification': data.text = TC_INPUT_TEXTBOX.value data.elementIdToUpdate = TC_OUTPUT.id data.targetType = 'tokens' break; case 'zero-shot-classification': data.text = ZSC_INPUT_TEXTBOX.value data.classes = getZSClasses(ZSC_CLASSES); data.elementIdToUpdate = ZSC_OUTPUT_CANVAS.id data.targetType = 'chart' data.updateLabels = true break; case 'question-answering': data.context = QA_CONTEXT_TEXTBOX.value data.question = QA_QUESTION_TEXTBOX.value data.elementIdToUpdate = QA_ANSWER_TEXTBOX.id break; case 'summarization': data.text = SUMMARIZATION_INPUT_TEXTBOX.value data.elementIdToUpdate = SUMMARIZATION_OUTPUT_TEXTBOX.id break; case 'automatic-speech-recognition': const sampling_rate = 16000; const audioCTX = new AudioContext({ sampleRate: sampling_rate }) const response = await (await fetch(SPEECH2TEXT_AUDIO.currentSrc)).arrayBuffer() const decoded = await audioCTX.decodeAudioData(response) data.audio = decoded.getChannelData(0); data.elementIdToUpdate = SPEECH2TEXT_OUTPUT_TEXTBOX.id break; case 'image-to-text': data.image = getImageDataFromImage(TEXT2IMAGE_IMG) data.elementIdToUpdate = TEXT2IMAGE_OUTPUT_TEXTBOX.id break; case 'image-classification': data.image = getImageDataFromImage(IMAGE_CLASSIFICATION_IMG) data.elementIdToUpdate = IMAGE_CLASSIFICATION_OUTPUT_CANVAS.id data.targetType = 'chart' data.updateLabels = true break; case 'zero-shot-image-classification': data.image = getImageDataFromImage(ZSIC_IMG) data.classes = getZSClasses(ZSIC_CLASSES); data.elementIdToUpdate = ZSIC_OUTPUT_CANVAS.id data.targetType = 'chart' data.updateLabels = true break; case 'object-detection': data.image = getImageDataFromImage(OD_IMG) data.targetType = 'overlay' data.chartId = OD_OUTPUT_CANVAS.id data.elementIdToUpdate = OD_OUTPUT_OVERLAY.id break; default: return; } worker.postMessage(data); }); // Handle result returned by the web worker worker.addEventListener('message', (event) => { const message = event.data; switch (message.type) { case 'download': // for session creation if (message.data.status === 'initiate') { PROGRESS.style.display = 'block'; // create progress bar PROGRESS_BARS.appendChild(htmlToElement(` <div class="progress w-100" model="${message.data.name}" file="${message.data.file}"> <div class="progress-bar" role="progressbar"></div> </div> `)); } else { let bar = PROGRESS_BARS.querySelector(`.progress[model="${message.data.name}"][file="${message.data.file}"]> .progress-bar`) switch (message.data.status) { case 'progress': // update existing bar bar.style.width = message.data.progress.toFixed(2) + '%'; bar.textContent = `${message.data.file} (${formatBytes(message.data.loaded)} / ${formatBytes(message.data.total)})`; break; case 'done': // Remove the progress bar bar.parentElement.remove(); break; case 'ready': // Pipeline is ready - hide container PROGRESS.style.display = 'none'; PROGRESS_BARS.innerHTML = ''; break; } } break; case 'update': // for generation let target = message.target; let elem = document.getElementById(target); switch (message.targetType) { case 'code': CODE_BLOCKS[target].update(message.data); break; default: // is textbox elem.value = message.data break; } break; case 'complete': switch (message.targetType) { case 'chart': const chartToUpdate = CHARTS[message.target]; let chartData = chartToUpdate.data.datasets[0].data; if (message.updateLabels) { for (let i = 0; i < message.data.length; ++i) { let item = message.data[i]; chartData[i] = item.score; chartToUpdate.data.labels[i] = item.label; } } else { // set data, ensuring labels align correctly for (let item of message.data) { chartData[ chartToUpdate.data.labels.indexOf(item.label) ] = item.score } } chartToUpdate.update(); // update the chart break; case 'tokens': let target = document.getElementById(message.target); target.innerHTML = ''; let tokens = message.data; for (let token of tokens) { let elem; if (token.type === 'O') { elem = document.createTextNode(token.text); } else { let [textColour, backgroundColour, tagColour] = NER_TAGS[token.type]; elem = htmlToElement(`<span class="ner-container" style="background-color: ${backgroundColour}; color: ${textColour};">${token.text}<span class="ner-tag" style="background-color: ${tagColour}; color: ${backgroundColour};">${token.type}</span></span>`); } target.appendChild(elem); } break; case 'overlay': let parent = document.getElementById(message.target); // Clear previous output, just in case parent.innerHTML = ''; let viewbox = parent.viewBox.baseVal; let colours = []; let borderColours = []; let items = message.data; for (let i = 0; i < items.length; ++i) { const box = items[i].box; let svgns = "http://www.w3.org/2000/svg"; let rect = document.createElementNS(svgns, 'rect'); rect.setAttribute('x', viewbox.width * box.xmin); rect.setAttribute('y', viewbox.height * box.ymin); rect.setAttribute('width', viewbox.width * (box.xmax - box.xmin)); rect.setAttribute('height', viewbox.height * (box.ymax - box.ymin)); const colour = COLOURS[i % COLOURS.length]; rect.style.stroke = rect.style.fill = `rgba(${colour}, 1)`; colours.push(`rgba(${colour}, 0.5)`); borderColours.push(`rgba(${colour}, 1)`); parent.appendChild(rect); } // Update chart label and data const chart = CHARTS[message.chartId]; chart.data.labels = items.map(x => x.label); chart.data.datasets[0] = { data: items.map(x => x.score), backgroundColor: colours, borderColor: borderColours }; chart.update() break; default: // is text document.getElementById(message.target).value = message.data break; } break; default: break; } }); // Utility functions function escapeHtml(unsafe) { return unsafe.replaceAll('&', '&amp;').replaceAll('<', '&lt;').replaceAll('>', '&gt;').replaceAll('"', '&quot;').replaceAll("'", '&#039;'); } function htmlToElement(html) { // https://stackoverflow.com/a/35385518 let template = document.createElement('template'); html = html.trim(); // Never return a text node of whitespace as the result template.innerHTML = html; return template.content.firstChild; } function formatBytes(bytes, decimals = 0) { const sizes = ["Bytes", "KB", "MB", "GB", "TB"]; if (bytes === 0) return "0 Bytes"; const i = parseInt(Math.floor(Math.log(bytes) / Math.log(1000)), 10); const rounded = (bytes / Math.pow(1000, i)).toFixed(decimals); return rounded + " " + sizes[i]; } function getImageDataFromImage(original) { // Helper function to get image data from image element const canvas = document.createElement('canvas'); canvas.width = original.naturalWidth; canvas.height = original.naturalHeight; const ctx = canvas.getContext('2d'); // TODO play around with ctx options? // ctx.patternQuality = 'bilinear'; // ctx.quality = 'bilinear'; // ctx.antialias = 'default'; // ctx.imageSmoothingQuality = 'high'; ctx.drawImage(original, 0, 0, canvas.width, canvas.height); return canvas.toDataURL(); }
transformers.js/examples/demo-site/src/main.js/0
{ "file_path": "transformers.js/examples/demo-site/src/main.js", "repo_id": "transformers.js", "token_count": 9224 }
{ "name": "electron", "productName": "electron", "version": "1.0.0", "description": "Transformers.js sample Electron application", "main": "src/index.js", "scripts": { "start": "electron-forge start", "package": "electron-forge package", "make": "electron-forge make", "publish": "electron-forge publish", "lint": "echo \"No linting configured\"" }, "keywords": [], "author": "Xenova", "license": "MIT", "dependencies": { "@xenova/transformers": "^2.6.2", "electron-squirrel-startup": "^1.0.0" }, "devDependencies": { "@electron-forge/cli": "^6.1.1", "@electron-forge/maker-deb": "^6.1.1", "@electron-forge/maker-rpm": "^6.1.1", "@electron-forge/maker-squirrel": "^6.1.1", "@electron-forge/maker-zip": "^6.1.1", "electron": "^24.1.1" } }
transformers.js/examples/electron/package.json/0
{ "file_path": "transformers.js/examples/electron/package.json", "repo_id": "transformers.js", "token_count": 361 }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Transformers.js | Sample Browser Extension</title> <!-- Load styles --> <link rel="stylesheet" href="popup.css" /> </head> <body> <div class="container"> <h1>Transformers.js</h1> <h2>Run 🀗 Transformers in a Browser Extension!</h2> <input id="text" placeholder="Enter text here"> <pre id="output"></pre> </div> </body> </html>
transformers.js/examples/extension/src/popup.html/0
{ "file_path": "transformers.js/examples/extension/src/popup.html", "repo_id": "transformers.js", "token_count": 246 }
import { pipeline } from '@xenova/transformers'; import wavefile from 'wavefile'; // Load model let transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en'); // Load audio data let url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; let buffer = Buffer.from(await fetch(url).then(x => x.arrayBuffer())) // Read .wav file and convert it to required format let wav = new wavefile.WaveFile(buffer); wav.toBitDepth('32f'); // Pipeline expects input as a Float32Array wav.toSampleRate(16000); // Whisper expects audio with a sampling rate of 16000 let audioData = wav.getSamples(); if (Array.isArray(audioData)) { if (audioData.length > 1) { const SCALING_FACTOR = Math.sqrt(2); // Merge channels (into first channel to save memory) for (let i = 0; i < audioData[0].length; ++i) { audioData[0][i] = SCALING_FACTOR * (audioData[0][i] + audioData[1][i]) / 2; } } // Select first channel audioData = audioData[0]; } // Run model let start = performance.now(); let output = await transcriber(audioData); let end = performance.now(); console.log(`Execution duration: ${(end - start) / 1000} seconds`); console.log(output); // { text: ' And so my fellow Americans ask not what your country can do for you, ask what you can do for your country.' }
transformers.js/examples/node-audio-processing/index.js/0
{ "file_path": "transformers.js/examples/node-audio-processing/index.js", "repo_id": "transformers.js", "token_count": 479 }
import { pipeline } from '@xenova/transformers'; /** * This class uses the Singleton pattern to ensure that only one instance of the * pipeline is loaded. This is because loading the pipeline is an expensive * operation and we don't want to do it every time we want to translate a sentence. */ class MyTranslationPipeline { static task = 'translation'; static model = 'Xenova/nllb-200-distilled-600M'; static instance = null; static async getInstance(progress_callback = null) { if (this.instance === null) { this.instance = pipeline(this.task, this.model, { progress_callback }); } return this.instance; } } // Listen for messages from the main thread self.addEventListener('message', async (event) => { // Retrieve the translation pipeline. When called for the first time, // this will load the pipeline and save it for future use. let translator = await MyTranslationPipeline.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); // Actually perform the translation let output = await translator(event.data.text, { tgt_lang: event.data.tgt_lang, src_lang: event.data.src_lang, // Allows for partial output callback_function: x => { self.postMessage({ status: 'update', output: translator.tokenizer.decode(x[0].output_token_ids, { skip_special_tokens: true }) }); } }); // Send the output back to the main thread self.postMessage({ status: 'complete', output: output, }); });
transformers.js/examples/react-translator/src/worker.js/0
{ "file_path": "transformers.js/examples/react-translator/src/worker.js", "repo_id": "transformers.js", "token_count": 614 }
# Semantic Image Search This example shows you how to use Transformers.js to create a semantic image search engine. Check out the demo [here](https://huggingface.co/spaces/Xenova/semantic-image-search). ![Semantic Image Search Demo](https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/semantic-image-search-min.png) ## Getting Started ### Dataset This application uses images from [The Unsplash Dataset](https://github.com/unsplash/datasets), which you can download [here](https://unsplash.com/data/lite/latest). All you need for this demo is the `photos.tsv000` TSV file, which contains the metadata for all the images. ### Connecting to Supabase After creating a new [Supabase](https://supabase.com/) project, you'll need to: 1. Create an `images` table and import the data from `photos.tsv000`. 2. Add a column for `image_embeddings`: ```sql -- Add a new vector column with a dimension of 512 alter table images add column image_embedding vector(512); ``` 3. Add your `SUPABASE_URL`, `SUPABASE_ANON_KEY`, and `SUPABASE_SECRET_KEY` keys to a `.env.local` file (see `.env.local.example` for template). 4. Update the image embeddings in your database by running the following command: ```bash SUPABASE_URL=your-project-url \ SUPABASE_SECRET_KEY=your-secret-key \ node scripts/update-database.mjs ``` *Note:* This will take a while. Also, since queries are capped at 1000 returned rows, you'll need to run this command multiple times to insert all 25000 rows. 5. Create a new `match_images` [database function](https://supabase.com/docs/guides/database/functions): ```sql -- https://supabase.com/blog/openai-embeddings-postgres-vector create or replace function match_images ( query_embedding vector(512), match_threshold float, match_count int ) returns table ( photo_id text, photo_url text, photo_image_url text, photo_width int, photo_height int, photo_aspect_ratio float, photo_description text, ai_description text, blur_hash text, similarity float ) language sql stable as $$ select photo_id, photo_url, photo_image_url, photo_width, photo_height, photo_aspect_ratio, photo_description, ai_description, blur_hash, 1 - (image_embedding <=> query_embedding) as similarity from images where 1 - (image_embedding <=> query_embedding) > match_threshold order by similarity desc limit match_count; $$; ``` 5. Add a [database policy](https://supabase.com/docs/guides/auth/row-level-security#policies) to allow users to view the database: ```sql create policy "policy_name" on public.images for select using ( true ); ``` ### Development You can now run the development server with: ```bash npm run dev ``` Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
transformers.js/examples/semantic-image-search/README.md/0
{ "file_path": "transformers.js/examples/semantic-image-search/README.md", "repo_id": "transformers.js", "token_count": 1129 }
'use client' import { useState } from 'react' import { Modal } from './components/Modal'; import { SearchBar } from './components/SearchBar'; import { ImageGrid } from './components/ImageGrid'; export default function Home() { // Application state const [images, setImages] = useState(null); const [currentImage, setCurrentImage] = useState(null); const search = async (text) => { if (!text) return; const params = new URLSearchParams(); params.append('text', text); params.append('threshold', 0.1); params.append('limit', 100); // Make a request to the /classify route on the server. const result = await fetch(`/search?${params.toString()}`); const json = await result.json(); setImages(json); }; return ( <main className="mx-auto max-w-[1960px] p-4 relative"> <Modal currentImage={currentImage} setCurrentImage={setCurrentImage} /> <SearchBar search={search} /> <ImageGrid images={images} setCurrentImage={setCurrentImage} /> </main> ) }
transformers.js/examples/semantic-image-search/src/app/page.js/0
{ "file_path": "transformers.js/examples/semantic-image-search/src/app/page.js", "repo_id": "transformers.js", "token_count": 345 }
// Although not strictly necessary, we delegate the tokenization to a worker thread to avoid // any potential issues with the tokenizer blocking the main thread (especially for large inputs). import { env, AutoTokenizer } from '@xenova/transformers' env.allowLocalModels = false; // This is a map of all the tokenizer instances that we have loaded. // model_id -> promise that resolves to tokenizer const TOKENIZER_MAPPINGS = new Map(); // Listen for messages from the main thread self.addEventListener('message', async (event) => { let tokenizerPromise = TOKENIZER_MAPPINGS.get(event.data.model_id); // Load the tokenizer if it hasn't been loaded yet if (!tokenizerPromise) { tokenizerPromise = AutoTokenizer.from_pretrained(event.data.model_id); TOKENIZER_MAPPINGS.set(event.data.model_id, new Promise((resolve) => { // Just for visualization purposes, we may need to modify the tokenizer slightly tokenizerPromise.then((tokenizer) => { // NOTE: We just remove the StripDecoder from the llama tokenizer switch (tokenizer.constructor.name) { case 'LlamaTokenizer': case 'Grok1Tokenizer': // tokenizer.decoder.decoders.at(-1).constructor.name === 'StripDecoder' tokenizer.decoder.decoders.pop(); break; case 'T5Tokenizer': tokenizer.decoder.addPrefixSpace = false; break; } resolve(tokenizer); }); })); } const tokenizer = await tokenizerPromise; const text = event.data.text; const start = performance.now(); const token_ids = tokenizer.encode(text); const end = performance.now(); console.log('[INFO]', `Tokenized ${text.length} characters in ${(end - start).toFixed(2)}ms`) let decoded = token_ids.map(x => tokenizer.decode([x])); let margins = []; // Minor post-processing for visualization purposes switch (tokenizer.constructor.name) { case 'BertTokenizer': margins = decoded.map((x, i) => i === 0 || x.startsWith('##') ? 0 : 8); decoded = decoded.map(x => x.replace('##', '')); break; case 'T5Tokenizer': if (decoded.length > 0 && decoded.length !== ' ') { decoded[0] = decoded[0].replace(/^ /, ''); } break; } // Send the output back to the main thread self.postMessage({ token_ids, decoded, margins }); });
transformers.js/examples/tokenizer-playground/src/worker.js/0
{ "file_path": "transformers.js/examples/tokenizer-playground/src/worker.js", "repo_id": "transformers.js", "token_count": 1112 }
import './style.css'; import { env, AutoModel, AutoProcessor, RawImage } from '@xenova/transformers'; env.backends.onnx.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/[email protected]/dist/'; env.backends.onnx.wasm.numThreads = 1; // Reference the elements that we will need const status = document.getElementById('status'); const container = document.getElementById('container'); const canvas = document.getElementById('canvas'); const outputCanvas = document.getElementById('output-canvas'); const video = document.getElementById('video'); const sizeSlider = document.getElementById('size'); const sizeLabel = document.getElementById('size-value'); const scaleSlider = document.getElementById('scale'); const scaleLabel = document.getElementById('scale-value'); function setStreamSize(width, height) { video.width = outputCanvas.width = canvas.width = Math.round(width); video.height = outputCanvas.height = canvas.height = Math.round(height); } status.textContent = 'Loading model...'; // Load model and processor const model_id = 'Xenova/modnet'; let model; try { model = await AutoModel.from_pretrained(model_id, { device: 'webgpu', dtype: 'fp32', // TODO: add fp16 support }); } catch (err) { status.textContent = err.message; alert(err.message) throw err; } const processor = await AutoProcessor.from_pretrained(model_id); // Set up controls let size = 256; processor.feature_extractor.size = { shortest_edge: size }; sizeSlider.addEventListener('input', () => { size = Number(sizeSlider.value); processor.feature_extractor.size = { shortest_edge: size }; sizeLabel.textContent = size; }); sizeSlider.disabled = false; let scale = 0.5; scaleSlider.addEventListener('input', () => { scale = Number(scaleSlider.value); setStreamSize(video.videoWidth * scale, video.videoHeight * scale); scaleLabel.textContent = scale; }); scaleSlider.disabled = false; status.textContent = 'Ready'; let isProcessing = false; let previousTime; const context = canvas.getContext('2d', { willReadFrequently: true }); const outputContext = outputCanvas.getContext('2d', { willReadFrequently: true }); function updateCanvas() { const { width, height } = canvas; if (!isProcessing) { isProcessing = true; (async function () { // Read the current frame from the video context.drawImage(video, 0, 0, width, height); const currentFrame = context.getImageData(0, 0, width, height); const image = new RawImage(currentFrame.data, width, height, 4); // Pre-process image const inputs = await processor(image); // Predict alpha matte const { output } = await model({ input: inputs.pixel_values }); const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(width, height); // Update alpha channel const outPixelData = currentFrame; for (let i = 0; i < mask.data.length; ++i) { outPixelData.data[4 * i + 3] = mask.data[i]; } outputContext.putImageData(outPixelData, 0, 0); if (previousTime !== undefined) { const fps = 1000 / (performance.now() - previousTime); status.textContent = `FPS: ${fps.toFixed(2)}`; } previousTime = performance.now(); isProcessing = false; })(); } window.requestAnimationFrame(updateCanvas); } // Start the video stream navigator.mediaDevices.getUserMedia( { video: true }, // Ask for video ).then((stream) => { // Set up the video and canvas elements. video.srcObject = stream; video.play(); const videoTrack = stream.getVideoTracks()[0]; const { width, height } = videoTrack.getSettings(); setStreamSize(width * scale, height * scale); // Set container width and height depending on the image aspect ratio const ar = width / height; const [cw, ch] = (ar > 720 / 405) ? [720, 720 / ar] : [405 * ar, 405]; container.style.width = `${cw}px`; container.style.height = `${ch}px`; // Start the animation loop setTimeout(updateCanvas, 50); }).catch((error) => { alert(error); });
transformers.js/examples/webgpu-video-background-removal/main.js/0
{ "file_path": "transformers.js/examples/webgpu-video-background-removal/main.js", "repo_id": "transformers.js", "token_count": 1573 }
import { FEATURE_EXTRACTOR_NAME } from "../utils/constants.js"; import { Callable } from "../utils/generic.js"; import { getModelJSON } from "../utils/hub.js"; /** * Base class for feature extractors. */ export class FeatureExtractor extends Callable { /** * Constructs a new FeatureExtractor instance. * * @param {Object} config The configuration for the feature extractor. */ constructor(config) { super(); this.config = config } /** * Instantiate one of the feature extractor classes of the library from a pretrained model. * * The feature extractor class to instantiate is selected based on the `feature_extractor_type` property of * the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained feature_extractor hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing feature_extractor files, e.g., `./my_model_directory/`. * @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the feature_extractor. * * @returns {Promise<FeatureExtractor>} A new instance of the Feature Extractor class. */ static async from_pretrained(pretrained_model_name_or_path, options) { const config = await getModelJSON(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, true, options); return new this(config); } } /** * Helper function to validate audio inputs. * @param {any} audio The audio data. * @param {string} feature_extractor The name of the feature extractor. * @private */ export function validate_audio_inputs(audio, feature_extractor) { if (!(audio instanceof Float32Array || audio instanceof Float64Array)) { throw new Error( `${feature_extractor} expects input to be a Float32Array or a Float64Array, but got ${audio?.constructor?.name ?? typeof audio} instead. ` + `If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.` ) } }
transformers.js/src/base/feature_extraction_utils.js/0
{ "file_path": "transformers.js/src/base/feature_extraction_utils.js", "repo_id": "transformers.js", "token_count": 822 }
import { ImageProcessor, } from "../../base/image_processors_utils.js"; import { cat, full, interpolate_4d, slice, stack } from "../../utils/tensor.js"; export class Idefics3ImageProcessor extends ImageProcessor { constructor(config) { super(config); this.do_image_splitting = config.do_image_splitting ?? true; this.max_image_size = config.max_image_size; } /** * @typedef {import('../../utils/image.js').RawImage} RawImage * @typedef {import('../../utils/tensor.js').Tensor} Tensor */ /** * Calculate size to resize images to, to be multiples of `vision_encoder_max_size` while preserving the aspect ratio. * @param {Tensor} pixel_values Tensor of the image to resize. * @param {number} vision_encoder_max_size Maximum size of the output image. If the image is larger than this size, * it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. */ get_resize_for_vision_encoder(pixel_values, vision_encoder_max_size) { let [height, width] = pixel_values.dims.slice(-2); const aspect_ratio = width / height; if (width >= height) { width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size; height = Math.floor(width / aspect_ratio); height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size; } else { height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size; width = Math.floor(height * aspect_ratio); width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size; } return { height, width }; } /** @param {RawImage|RawImage[]|RawImage[][]} images */ async _call(images, { do_image_splitting = null, return_row_col_info = false, } = {}) { /** @type {RawImage[][]} */ let batched_2d_images; if (!Array.isArray(images)) { batched_2d_images = [[images]]; } else { if (images.length === 0 || !images[0]) { throw new Error("No images provided."); } if (!Array.isArray(images[0])) { batched_2d_images = [/** @type {RawImage[]} */(images)]; } else { batched_2d_images = /** @type {RawImage[][]} */(images); } } // List of tensors, each with shape [patches, channels, height, width] let all_pixel_values = []; let images_list_rows = []; let images_list_cols = []; const original_sizes = []; const reshaped_input_sizes = []; for (const image_batch of batched_2d_images) { let images_list = await Promise.all(image_batch.map(x => this.preprocess(x))); // Original sizes of images original_sizes.push(...images_list.map(x => x.original_size)); // Reshaped sizes of images, before padding or cropping reshaped_input_sizes.push(...images_list.map(x => x.reshaped_input_size)); // Convert images to 4D tensors for easier processing images_list.forEach(x => x.pixel_values.unsqueeze_(0)); const { longest_edge } = this.max_image_size; /** @type {Tensor[]} */ let images_tensor; if (do_image_splitting ?? this.do_image_splitting) { let image_rows = new Array(images_list.length); let image_cols = new Array(images_list.length); // We first resize both height and width of each image to the nearest max_image_size multiple, disregarding the aspect ratio images_tensor = await Promise.all(images_list.map(async (x, i) => { const new_size = this.get_resize_for_vision_encoder(x.pixel_values, longest_edge); const resized = await interpolate_4d(x.pixel_values, { size: [new_size.height, new_size.width], }); const { frames, num_splits_h, num_splits_w } = await this.split_image(resized, this.max_image_size); image_rows[i] = num_splits_h; image_cols[i] = num_splits_w; return cat(frames, 0); })); images_list_rows.push(image_rows); images_list_cols.push(image_cols); } else { /** @type {[number, number]} */ const size = [longest_edge, longest_edge]; images_tensor = await Promise.all( images_list.map(x => interpolate_4d(x.pixel_values, { size })) ); images_list_rows.push(new Array(images_list.length).fill(0)); images_list_cols.push(new Array(images_list.length).fill(0)); } all_pixel_values.push(cat(images_tensor, 0)); } const batch_size = all_pixel_values.length; const [n, c, h, w] = all_pixel_values[0].dims; // Stack pixel values let pixel_values; let pixel_attention_mask; if (batch_size === 1) { pixel_values = all_pixel_values[0].unsqueeze_(0); pixel_attention_mask = full([batch_size, n, h, w], true); } else { // Add padding (if necessary) to images with less patches than the maximum number of patches const max_num_patches = Math.max(...all_pixel_values.map(x => x.dims.at(0))); pixel_attention_mask = full([batch_size, max_num_patches, h, w], true); const pixel_attention_mask_data = pixel_attention_mask.data; const pixel_attention_mask_stride = max_num_patches * h * w; for (let i = 0; i < batch_size; ++i) { const num_patches = all_pixel_values[i].dims[0]; if (num_patches < max_num_patches) { all_pixel_values[i] = cat([ all_pixel_values[i], full([max_num_patches - num_patches, c, h, w], 0), ], 0); const start_offset = i * pixel_attention_mask_stride + num_patches * h * w; const end_offset = (i + 1) * pixel_attention_mask_stride; // @ts-expect-error pixel_attention_mask_data.fill(false, start_offset, end_offset); } } pixel_values = stack(all_pixel_values, 0); } return { pixel_values, pixel_attention_mask, original_sizes, reshaped_input_sizes, ...( return_row_col_info ? { rows: images_list_rows, cols: images_list_cols } : {} ), } } async split_image(pixel_values, { longest_edge }) { const max_height = longest_edge; const max_width = longest_edge; const frames = []; const [height, width] = pixel_values.dims.slice(-2); let num_splits_h = 0, num_splits_w = 0; if (height > max_height || width > max_width) { // Calculate the number of splits num_splits_h = Math.ceil(height / max_height); num_splits_w = Math.ceil(width / max_width); // Calculate the optimal width and height for the sub-images const optimal_height = Math.ceil(height / num_splits_h); const optimal_width = Math.ceil(width / num_splits_w); // Iterate through each row and column for (let r = 0; r < num_splits_h; ++r) { for (let c = 0; c < num_splits_w; ++c) { let start_x, start_y, end_x, end_y; if (r === num_splits_h - 1) { // At bottom start_y = height - optimal_height; end_y = height; } else { start_y = r * optimal_height; end_y = (r + 1) * optimal_height; } if (c === num_splits_w - 1) { // At right start_x = width - optimal_width; end_x = width; } else { start_x = c * optimal_width; end_x = (c + 1) * optimal_width; } const starts = [start_y, start_x]; const ends = [end_y, end_x]; const patch = await slice(pixel_values, starts, ends, [2, 3]); frames.push(patch); } } // Resize the global image to match max dimensions for memory efficiency const global_image_height = max_height; const global_image_width = max_width; if (height !== global_image_height || width !== global_image_width) { pixel_values = await interpolate_4d(pixel_values, { size: [global_image_height, global_image_width], }) } } frames.push(pixel_values); return { frames, num_splits_h, num_splits_w }; } }
transformers.js/src/models/idefics3/image_processing_idefics3.js/0
{ "file_path": "transformers.js/src/models/idefics3/image_processing_idefics3.js", "repo_id": "transformers.js", "token_count": 4606 }
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; import { Tensor } from '../../utils/tensor.js'; export class MoonshineFeatureExtractor extends FeatureExtractor { /** * Asynchronously extracts input values from a given audio using the provided configuration. * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. * @returns {Promise<{ input_values: Tensor; }>} The extracted input values. */ async _call(audio) { validate_audio_inputs(audio, 'MoonshineFeatureExtractor'); if (audio instanceof Float64Array) { audio = new Float32Array(audio); } const shape = [ 1, /* batch_size */ audio.length, /* num_samples */ ]; return { input_values: new Tensor('float32', audio, shape), }; } }
transformers.js/src/models/moonshine/feature_extraction_moonshine.js/0
{ "file_path": "transformers.js/src/models/moonshine/feature_extraction_moonshine.js", "repo_id": "transformers.js", "token_count": 364 }
import { ImageProcessor, } from "../../base/image_processors_utils.js"; import { calculateDimensions } from "../../utils/core.js"; import { interpolate_4d, Tensor, } from "../../utils/tensor.js"; /** * @typedef {object} SamImageProcessorResult * @property {Tensor} pixel_values * @property {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes * @property {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes * @property {Tensor} [input_points] * @property {Tensor} [input_labels] * @property {Tensor} [input_boxes] */ export class SamImageProcessor extends ImageProcessor { /** * * @param {any} input_points * @param {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes * @param {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes * @returns {Tensor} */ reshape_input_points(input_points, original_sizes, reshaped_input_sizes, is_bounding_box = false) { // Make deep copy to avoid altering user's input input_points = structuredClone(input_points); let shape = calculateDimensions(input_points); // TODO: add support for 2D input_points if (shape.length === 3) { // Correct user's input if (!is_bounding_box) { shape = [1, ...shape]; } input_points = [input_points]; } else if (shape.length !== 4) { throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") } // Reshape input points for (let i = 0; i < input_points.length; ++i) { // batch_size let originalImageSize = original_sizes[i]; let reshapedImageSize = reshaped_input_sizes[i]; let resizeFactors = [ reshapedImageSize[0] / originalImageSize[0], reshapedImageSize[1] / originalImageSize[1] ] for (let j = 0; j < input_points[i].length; ++j) { // point_batch_size for (let k = 0; k < input_points[i][j].length; ++k) { // nb_points_per_image for (let w = 0; w < input_points[i][j][k].length; ++w) { // 2 or 4 input_points[i][j][k][w] *= resizeFactors[w % 2]; } } } } return new Tensor( 'float32', Float32Array.from(input_points.flat(Infinity)), shape ) } /** * * @param {any} input_labels * @param {Tensor} input_points * @returns {Tensor} */ add_input_labels(input_labels, input_points) { let shape = calculateDimensions(input_labels); if (shape.length === 2) { // Correct user's input shape = [1, ...shape]; input_labels = [input_labels]; } else if (shape.length !== 3) { throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") } if (shape.some((x, i) => x !== input_points.dims[i])) { throw Error(`The first ${shape.length} dimensions of 'input_points' and 'input_labels' must be the same.`) } return new Tensor( 'int64', input_labels.flat(Infinity).map(BigInt), shape, ) } /** * @param {any[]} images The URL(s) of the image(s) to extract features from. * @param {Object} [options] Additional options for the processor. * @param {any} [options.input_points=null] A 3D or 4D array, representing the input points provided by the user. * - 3D: `[point_batch_size, nb_points_per_image, 2]`. In this case, `batch_size` is assumed to be 1. * - 4D: `[batch_size, point_batch_size, nb_points_per_image, 2]`. * @param {any} [options.input_labels=null] A 2D or 3D array, representing the input labels for the points, used by the prompt encoder to encode the prompt. * - 2D: `[point_batch_size, nb_points_per_image]`. In this case, `batch_size` is assumed to be 1. * - 3D: `[batch_size, point_batch_size, nb_points_per_image]`. * @param {number[][][]} [options.input_boxes=null] A 3D array of shape `(batch_size, num_boxes, 4)`, representing the input boxes provided by the user. * This is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. * The processor will generate a tensor, with each dimension corresponding respectively to the image batch size, * the number of boxes per image and the coordinates of the top left and botton right point of the box. * In the order (`x1`, `y1`, `x2`, `y2`): * - `x1`: the x coordinate of the top left point of the input box * - `y1`: the y coordinate of the top left point of the input box * - `x2`: the x coordinate of the bottom right point of the input box * - `y2`: the y coordinate of the bottom right point of the input box * @returns {Promise<SamImageProcessorResult>} */ async _call(images, { input_points = null, input_labels = null, input_boxes = null } = {}) { // TODO allow user to use preprocessed images /** @type {SamImageProcessorResult} */ const processed = await super._call(images); if (input_points) { processed.input_points = this.reshape_input_points( input_points, processed.original_sizes, processed.reshaped_input_sizes ); } if (input_labels) { if (!processed.input_points) { throw Error("`input_points` must be provided if `input_labels` are provided.") } processed.input_labels = this.add_input_labels(input_labels, processed.input_points); } if (input_boxes) { processed.input_boxes = this.reshape_input_points( input_boxes, processed.original_sizes, processed.reshaped_input_sizes, true, ); } return processed; } /** * Remove padding and upscale masks to the original image size. * @param {Tensor} masks Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. * @param {[number, number][]} original_sizes The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. * @param {[number, number][]} reshaped_input_sizes The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. * @param {Object} options Optional parameters for post-processing. * @param {number} [options.mask_threshold] The threshold to use for binarizing the masks. * @param {boolean} [options.binarize] Whether to binarize the masks. * @param {Object} [options.pad_size] The target size the images were padded to before being passed to the model. If `null`, the target size is assumed to be the processor's `pad_size`. * @param {number} [options.pad_size.height] The height the images were padded to. * @param {number} [options.pad_size.width] The width the images were padded to. * @returns {Promise<Tensor[]>} Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. */ async post_process_masks(masks, original_sizes, reshaped_input_sizes, { mask_threshold = 0.0, binarize = true, pad_size = null, } = {}) { // masks: [1, 1, 3, 256, 256] const output_masks = []; pad_size = pad_size ?? this.pad_size; /** @type {[number, number]} */ const target_image_size = [pad_size.height, pad_size.width]; for (let i = 0; i < original_sizes.length; ++i) { const original_size = original_sizes[i]; const reshaped_input_size = reshaped_input_sizes[i]; // Upscale mask to padded size let interpolated_mask = (await interpolate_4d( masks[i], { mode: 'bilinear', size: target_image_size } )); // Crop mask interpolated_mask = interpolated_mask.slice(null, null, [0, reshaped_input_size[0]], [0, reshaped_input_size[1]]); // Downscale mask interpolated_mask = (await interpolate_4d( interpolated_mask, { mode: 'bilinear', size: original_size } )); if (binarize) { const data = interpolated_mask.data; const binarizedMaskData = new Uint8Array(data.length); for (let i = 0; i < data.length; ++i) { if (data[i] > mask_threshold) { binarizedMaskData[i] = 1; } } interpolated_mask = new Tensor( 'bool', binarizedMaskData, interpolated_mask.dims ) } output_masks.push(interpolated_mask); } return output_masks; } /** * Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. * @param {import("../../utils/image.js").RawImage} image Input original image * @param {number} target_size Target size of the resized image * @param {Object} options Options for generating crop boxes * @param {number} [options.crop_n_layers] If >0, mask prediction will be run again on crops of the image. * Sets the number of layers to run, where each layer has 2**i_layer number of image crops. * @param {number} [options.overlap_ratio] Sets the degree to which crops overlap. In the first crop layer, * crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. * @param {number} [options.points_per_crop] Number of points to sample from each crop. * @param {number} [options.crop_n_points_downscale_factor] The number of points-per-side sampled in layer n is * scaled down by crop_n_points_downscale_factor**n. * @returns {Object} An object containing the crop boxes, number of points per crop, cropped images, and input labels. */ generate_crop_boxes(image, target_size, { crop_n_layers = 0, overlap_ratio = 512 / 1500, points_per_crop = 32, crop_n_points_downscale_factor = 1, } = {}) { // TODO: Implement // return { crop_boxes, points_per_crop, cropped_images, input_labels } } }
transformers.js/src/models/sam/image_processing_sam.js/0
{ "file_path": "transformers.js/src/models/sam/image_processing_sam.js", "repo_id": "transformers.js", "token_count": 4498 }
const WHISPER_LANGUAGES = [ ["en", "english"], ["zh", "chinese"], ["de", "german"], ["es", "spanish"], ["ru", "russian"], ["ko", "korean"], ["fr", "french"], ["ja", "japanese"], ["pt", "portuguese"], ["tr", "turkish"], ["pl", "polish"], ["ca", "catalan"], ["nl", "dutch"], ["ar", "arabic"], ["sv", "swedish"], ["it", "italian"], ["id", "indonesian"], ["hi", "hindi"], ["fi", "finnish"], ["vi", "vietnamese"], ["he", "hebrew"], ["uk", "ukrainian"], ["el", "greek"], ["ms", "malay"], ["cs", "czech"], ["ro", "romanian"], ["da", "danish"], ["hu", "hungarian"], ["ta", "tamil"], ["no", "norwegian"], ["th", "thai"], ["ur", "urdu"], ["hr", "croatian"], ["bg", "bulgarian"], ["lt", "lithuanian"], ["la", "latin"], ["mi", "maori"], ["ml", "malayalam"], ["cy", "welsh"], ["sk", "slovak"], ["te", "telugu"], ["fa", "persian"], ["lv", "latvian"], ["bn", "bengali"], ["sr", "serbian"], ["az", "azerbaijani"], ["sl", "slovenian"], ["kn", "kannada"], ["et", "estonian"], ["mk", "macedonian"], ["br", "breton"], ["eu", "basque"], ["is", "icelandic"], ["hy", "armenian"], ["ne", "nepali"], ["mn", "mongolian"], ["bs", "bosnian"], ["kk", "kazakh"], ["sq", "albanian"], ["sw", "swahili"], ["gl", "galician"], ["mr", "marathi"], ["pa", "punjabi"], ["si", "sinhala"], ["km", "khmer"], ["sn", "shona"], ["yo", "yoruba"], ["so", "somali"], ["af", "afrikaans"], ["oc", "occitan"], ["ka", "georgian"], ["be", "belarusian"], ["tg", "tajik"], ["sd", "sindhi"], ["gu", "gujarati"], ["am", "amharic"], ["yi", "yiddish"], ["lo", "lao"], ["uz", "uzbek"], ["fo", "faroese"], ["ht", "haitian creole"], ["ps", "pashto"], ["tk", "turkmen"], ["nn", "nynorsk"], ["mt", "maltese"], ["sa", "sanskrit"], ["lb", "luxembourgish"], ["my", "myanmar"], ["bo", "tibetan"], ["tl", "tagalog"], ["mg", "malagasy"], ["as", "assamese"], ["tt", "tatar"], ["haw", "hawaiian"], ["ln", "lingala"], ["ha", "hausa"], ["ba", "bashkir"], ["jw", "javanese"], ["su", "sundanese"], ] // @ts-ignore export const WHISPER_LANGUAGE_MAPPING = new Map(WHISPER_LANGUAGES); // @ts-ignore export const WHISPER_TO_LANGUAGE_CODE_MAPPING = new Map([ ...WHISPER_LANGUAGES.map(([k, v]) => [v, k]), ...[ ["burmese", "my"], ["valencian", "ca"], ["flemish", "nl"], ["haitian", "ht"], ["letzeburgesch", "lb"], ["pushto", "ps"], ["panjabi", "pa"], ["moldavian", "ro"], ["moldovan", "ro"], ["sinhalese", "si"], ["castilian", "es"], ] ]); /** * @param {string} language The language name or code * @returns {string} The language code */ export function whisper_language_to_code(language) { language = language.toLowerCase(); // Map to code from user-friendly name (e.g., "english" -> "en") let language_code = WHISPER_TO_LANGUAGE_CODE_MAPPING.get(language); if (language_code === undefined) { // User provided something that is not a language name if (WHISPER_LANGUAGE_MAPPING.has(language)) { // User provided the language code directly (e.g., "en") language_code = language; } else { // User provided something that is not a language code or name const is_language_code = language.length === 2; const langs = is_language_code ? WHISPER_LANGUAGE_MAPPING.keys() : WHISPER_LANGUAGE_MAPPING.values(); throw new Error(`Language "${language}" is not supported. Must be one of: ${JSON.stringify(langs)}`); } } return language_code; }
transformers.js/src/models/whisper/common_whisper.js/0
{ "file_path": "transformers.js/src/models/whisper/common_whisper.js", "repo_id": "transformers.js", "token_count": 1848 }
/** * @file Utility functions to interact with the Hugging Face Hub (https://huggingface.co/models) * * @module utils/hub */ import fs from 'fs'; import path from 'path'; import { env } from '../env.js'; import { dispatchCallback } from './core.js'; /** * @typedef {Object} PretrainedOptions Options for loading a pretrained model. * @property {import('./core.js').ProgressCallback} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates. * @property {import('../configs.js').PretrainedConfig} [config=null] Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: * - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). * - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. * @property {string} [cache_dir=null] Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. * @property {boolean} [local_files_only=false] Whether or not to only look at local files (e.g., not try downloading the model). * @property {string} [revision='main'] The specific model version to use. It can be a branch name, a tag name, or a commit id, * since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. * NOTE: This setting is ignored for local requests. */ /** * @typedef {Object} ModelSpecificPretrainedOptions Options for loading a pretrained model. * @property {string} [subfolder='onnx'] In case the relevant files are located inside a subfolder of the model repo on huggingface.co, * you can specify the folder name here. * @property {string} [model_file_name=null] If specified, load the model with this name (excluding the .onnx suffix). Currently only valid for encoder- or decoder-only models. * @property {import("./devices.js").DeviceType|Record<string, import("./devices.js").DeviceType>} [device=null] The device to run the model on. If not specified, the device will be chosen from the environment settings. * @property {import("./dtypes.js").DataType|Record<string, import("./dtypes.js").DataType>} [dtype=null] The data type to use for the model. If not specified, the data type will be chosen from the environment settings. * @property {boolean|Record<string, boolean>} [use_external_data_format=false] Whether to load the model using the external data format (used for models >= 2GB in size). * @property {import('onnxruntime-common').InferenceSession.SessionOptions} [session_options] (Optional) User-specified session options passed to the runtime. If not provided, suitable defaults will be chosen. */ /** * @typedef {PretrainedOptions & ModelSpecificPretrainedOptions} PretrainedModelOptions Options for loading a pretrained model. */ /** * Mapping from file extensions to MIME types. */ const CONTENT_TYPE_MAP = { 'txt': 'text/plain', 'html': 'text/html', 'css': 'text/css', 'js': 'text/javascript', 'json': 'application/json', 'png': 'image/png', 'jpg': 'image/jpeg', 'jpeg': 'image/jpeg', 'gif': 'image/gif', } class FileResponse { /** * Creates a new `FileResponse` object. * @param {string|URL} filePath */ constructor(filePath) { this.filePath = filePath; this.headers = new Headers(); this.exists = fs.existsSync(filePath); if (this.exists) { this.status = 200; this.statusText = 'OK'; let stats = fs.statSync(filePath); this.headers.set('content-length', stats.size.toString()); this.updateContentType(); let self = this; this.body = new ReadableStream({ start(controller) { self.arrayBuffer().then(buffer => { controller.enqueue(new Uint8Array(buffer)); controller.close(); }) } }); } else { this.status = 404; this.statusText = 'Not Found'; this.body = null; } } /** * Updates the 'content-type' header property of the response based on the extension of * the file specified by the filePath property of the current object. * @returns {void} */ updateContentType() { // Set content-type header based on file extension const extension = this.filePath.toString().split('.').pop().toLowerCase(); this.headers.set('content-type', CONTENT_TYPE_MAP[extension] ?? 'application/octet-stream'); } /** * Clone the current FileResponse object. * @returns {FileResponse} A new FileResponse object with the same properties as the current object. */ clone() { let response = new FileResponse(this.filePath); response.exists = this.exists; response.status = this.status; response.statusText = this.statusText; response.headers = new Headers(this.headers); return response; } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with an ArrayBuffer containing the file's contents. * @returns {Promise<ArrayBuffer>} A Promise that resolves with an ArrayBuffer containing the file's contents. * @throws {Error} If the file cannot be read. */ async arrayBuffer() { const data = await fs.promises.readFile(this.filePath); return /** @type {ArrayBuffer} */ (data.buffer); } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a Blob containing the file's contents. * @returns {Promise<Blob>} A Promise that resolves with a Blob containing the file's contents. * @throws {Error} If the file cannot be read. */ async blob() { const data = await fs.promises.readFile(this.filePath); return new Blob([data], { type: this.headers.get('content-type') }); } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a string containing the file's contents. * @returns {Promise<string>} A Promise that resolves with a string containing the file's contents. * @throws {Error} If the file cannot be read. */ async text() { const data = await fs.promises.readFile(this.filePath, 'utf8'); return data; } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a parsed JavaScript object containing the file's contents. * * @returns {Promise<Object>} A Promise that resolves with a parsed JavaScript object containing the file's contents. * @throws {Error} If the file cannot be read. */ async json() { return JSON.parse(await this.text()); } } /** * Determines whether the given string is a valid URL. * @param {string|URL} string The string to test for validity as an URL. * @param {string[]} [protocols=null] A list of valid protocols. If specified, the protocol must be in this list. * @param {string[]} [validHosts=null] A list of valid hostnames. If specified, the URL's hostname must be in this list. * @returns {boolean} True if the string is a valid URL, false otherwise. */ function isValidUrl(string, protocols = null, validHosts = null) { let url; try { url = new URL(string); } catch (_) { return false; } if (protocols && !protocols.includes(url.protocol)) { return false; } if (validHosts && !validHosts.includes(url.hostname)) { return false; } return true; } /** * Helper function to get a file, using either the Fetch API or FileSystem API. * * @param {URL|string} urlOrPath The URL/path of the file to get. * @returns {Promise<FileResponse|Response>} A promise that resolves to a FileResponse object (if the file is retrieved using the FileSystem API), or a Response object (if the file is retrieved using the Fetch API). */ export async function getFile(urlOrPath) { if (env.useFS && !isValidUrl(urlOrPath, ['http:', 'https:', 'blob:'])) { return new FileResponse(urlOrPath); } else if (typeof process !== 'undefined' && process?.release?.name === 'node') { const IS_CI = !!process.env?.TESTING_REMOTELY; const version = env.version; const headers = new Headers(); headers.set('User-Agent', `transformers.js/${version}; is_ci/${IS_CI};`); // Check whether we are making a request to the Hugging Face Hub. const isHFURL = isValidUrl(urlOrPath, ['http:', 'https:'], ['huggingface.co', 'hf.co']); if (isHFURL) { // If an access token is present in the environment variables, // we add it to the request headers. // NOTE: We keep `HF_ACCESS_TOKEN` for backwards compatibility (as a fallback). const token = process.env?.HF_TOKEN ?? process.env?.HF_ACCESS_TOKEN; if (token) { headers.set('Authorization', `Bearer ${token}`); } } return fetch(urlOrPath, { headers }); } else { // Running in a browser-environment, so we use default headers // NOTE: We do not allow passing authorization headers in the browser, // since this would require exposing the token to the client. return fetch(urlOrPath); } } const ERROR_MAPPING = { // 4xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses) 400: 'Bad request error occurred while trying to load file', 401: 'Unauthorized access to file', 403: 'Forbidden access to file', 404: 'Could not locate file', 408: 'Request timeout error occurred while trying to load file', // 5xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses) 500: 'Internal server error error occurred while trying to load file', 502: 'Bad gateway error occurred while trying to load file', 503: 'Service unavailable error occurred while trying to load file', 504: 'Gateway timeout error occurred while trying to load file', } /** * Helper method to handle fatal errors that occur while trying to load a file from the Hugging Face Hub. * @param {number} status The HTTP status code of the error. * @param {string} remoteURL The URL of the file that could not be loaded. * @param {boolean} fatal Whether to raise an error if the file could not be loaded. * @returns {null} Returns `null` if `fatal = true`. * @throws {Error} If `fatal = false`. */ function handleError(status, remoteURL, fatal) { if (!fatal) { // File was not loaded correctly, but it is optional. // TODO in future, cache the response? return null; } const message = ERROR_MAPPING[status] ?? `Error (${status}) occurred while trying to load file`; throw Error(`${message}: "${remoteURL}".`); } class FileCache { /** * Instantiate a `FileCache` object. * @param {string} path */ constructor(path) { this.path = path; } /** * Checks whether the given request is in the cache. * @param {string} request * @returns {Promise<FileResponse | undefined>} */ async match(request) { let filePath = path.join(this.path, request); let file = new FileResponse(filePath); if (file.exists) { return file; } else { return undefined; } } /** * Adds the given response to the cache. * @param {string} request * @param {Response|FileResponse} response * @returns {Promise<void>} */ async put(request, response) { const buffer = Buffer.from(await response.arrayBuffer()); let outputPath = path.join(this.path, request); try { await fs.promises.mkdir(path.dirname(outputPath), { recursive: true }); await fs.promises.writeFile(outputPath, buffer); } catch (err) { console.warn('An error occurred while writing the file to cache:', err) } } // TODO add the rest? // addAll(requests: RequestInfo[]): Promise<void>; // delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<boolean>; // keys(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Request>>; // match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<Response | undefined>; // matchAll(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Response>>; } /** * * @param {FileCache|Cache} cache The cache to search * @param {string[]} names The names of the item to search for * @returns {Promise<FileResponse|Response|undefined>} The item from the cache, or undefined if not found. */ async function tryCache(cache, ...names) { for (let name of names) { try { let result = await cache.match(name); if (result) return result; } catch (e) { continue; } } return undefined; } /** * * Retrieves a file from either a remote URL using the Fetch API or from the local file system using the FileSystem API. * If the filesystem is available and `env.useCache = true`, the file will be downloaded and cached. * * @param {string} path_or_repo_id This can be either: * - a string, the *model id* of a model repo on huggingface.co. * - a path to a *directory* potentially containing the file. * @param {string} filename The name of the file to locate in `path_or_repo`. * @param {boolean} [fatal=true] Whether to throw an error if the file is not found. * @param {PretrainedOptions} [options] An object containing optional parameters. * * @throws Will throw an error if the file is not found and `fatal` is true. * @returns {Promise<Uint8Array>} A Promise that resolves with the file content as a buffer. */ export async function getModelFile(path_or_repo_id, filename, fatal = true, options = {}) { if (!env.allowLocalModels) { // User has disabled local models, so we just make sure other settings are correct. if (options.local_files_only) { throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).") } else if (!env.allowRemoteModels) { throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.") } } // Initiate file retrieval dispatchCallback(options.progress_callback, { status: 'initiate', name: path_or_repo_id, file: filename }) // First, check if the a caching backend is available // If no caching mechanism available, will download the file every time let cache; if (!cache && env.useBrowserCache) { if (typeof caches === 'undefined') { throw Error('Browser cache is not available in this environment.') } try { // In some cases, the browser cache may be visible, but not accessible due to security restrictions. // For example, when running an application in an iframe, if a user attempts to load the page in // incognito mode, the following error is thrown: `DOMException: Failed to execute 'open' on 'CacheStorage': // An attempt was made to break through the security policy of the user agent.` // So, instead of crashing, we just ignore the error and continue without using the cache. cache = await caches.open('transformers-cache'); } catch (e) { console.warn('An error occurred while opening the browser cache:', e); } } if (!cache && env.useFSCache) { // TODO throw error if not available // If `cache_dir` is not specified, use the default cache directory cache = new FileCache(options.cache_dir ?? env.cacheDir); } if (!cache && env.useCustomCache) { // Allow the user to specify a custom cache system. if (!env.customCache) { throw Error('`env.useCustomCache=true`, but `env.customCache` is not defined.') } // Check that the required methods are defined: if (!env.customCache.match || !env.customCache.put) { throw new Error( "`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. " + "For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache" ) } cache = env.customCache; } const revision = options.revision ?? 'main'; let requestURL = pathJoin(path_or_repo_id, filename); let localPath = pathJoin(env.localModelPath, requestURL); let remoteURL = pathJoin( env.remoteHost, env.remotePathTemplate .replaceAll('{model}', path_or_repo_id) .replaceAll('{revision}', encodeURIComponent(revision)), filename ); // Choose cache key for filesystem cache // When using the main revision (default), we use the request URL as the cache key. // If a specific revision is requested, we account for this in the cache key. let fsCacheKey = revision === 'main' ? requestURL : pathJoin(path_or_repo_id, revision, filename); /** @type {string} */ let cacheKey; let proposedCacheKey = cache instanceof FileCache ? fsCacheKey : remoteURL; // Whether to cache the final response in the end. let toCacheResponse = false; /** @type {Response|FileResponse|undefined} */ let response; if (cache) { // A caching system is available, so we try to get the file from it. // 1. We first try to get from cache using the local path. In some environments (like deno), // non-URL cache keys are not allowed. In these cases, `response` will be undefined. // 2. If no response is found, we try to get from cache using the remote URL or file system cache. response = await tryCache(cache, localPath, proposedCacheKey); } const cacheHit = response !== undefined; if (response === undefined) { // Caching not available, or file is not cached, so we perform the request if (env.allowLocalModels) { // Accessing local models is enabled, so we try to get the file locally. // If request is a valid HTTP URL, we skip the local file check. Otherwise, we try to get the file locally. const isURL = isValidUrl(requestURL, ['http:', 'https:']); if (!isURL) { try { response = await getFile(localPath); cacheKey = localPath; // Update the cache key to be the local path } catch (e) { // Something went wrong while trying to get the file locally. // NOTE: error handling is done in the next step (since `response` will be undefined) console.warn(`Unable to load from local path "${localPath}": "${e}"`); } } else if (options.local_files_only) { throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${requestURL}.`); } else if (!env.allowRemoteModels) { throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${requestURL}.`); } } if (response === undefined || response.status === 404) { // File not found locally. This means either: // - The user has disabled local file access (`env.allowLocalModels=false`) // - the path is a valid HTTP url (`response === undefined`) // - the path is not a valid HTTP url and the file is not present on the file system or local server (`response.status === 404`) if (options.local_files_only || !env.allowRemoteModels) { // User requested local files only, but the file is not found locally. if (fatal) { throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${localPath}".`); } else { // File not found, but this file is optional. // TODO in future, cache the response? return null; } } // File not found locally, so we try to download it from the remote server response = await getFile(remoteURL); if (response.status !== 200) { return handleError(response.status, remoteURL, fatal); } // Success! We use the proposed cache key from earlier cacheKey = proposedCacheKey; } // Only cache the response if: toCacheResponse = cache // 1. A caching system is available && typeof Response !== 'undefined' // 2. `Response` is defined (i.e., we are in a browser-like environment) && response instanceof Response // 3. result is a `Response` object (i.e., not a `FileResponse`) && response.status === 200 // 4. request was successful (status code 200) } // Start downloading dispatchCallback(options.progress_callback, { status: 'download', name: path_or_repo_id, file: filename }) /** @type {Uint8Array} */ let buffer; if (!options.progress_callback) { // If no progress callback is specified, we can use the `.arrayBuffer()` // method to read the response. buffer = new Uint8Array(await response.arrayBuffer()); } else if ( cacheHit // The item is being read from the cache && typeof navigator !== 'undefined' && /firefox/i.test(navigator.userAgent) // We are in Firefox ) { // Due to bug in Firefox, we cannot display progress when loading from cache. // Fortunately, since this should be instantaneous, this should not impact users too much. buffer = new Uint8Array(await response.arrayBuffer()); // For completeness, we still fire the final progress callback dispatchCallback(options.progress_callback, { status: 'progress', name: path_or_repo_id, file: filename, progress: 100, loaded: buffer.length, total: buffer.length, }) } else { buffer = await readResponse(response, data => { dispatchCallback(options.progress_callback, { status: 'progress', name: path_or_repo_id, file: filename, ...data, }) }) } if ( // Only cache web responses // i.e., do not cache FileResponses (prevents duplication) toCacheResponse && cacheKey && // Check again whether request is in cache. If not, we add the response to the cache (await cache.match(cacheKey) === undefined) ) { // NOTE: We use `new Response(buffer, ...)` instead of `response.clone()` to handle LFS files await cache.put(cacheKey, new Response(buffer, { headers: response.headers })) .catch(err => { // Do not crash if unable to add to cache (e.g., QuotaExceededError). // Rather, log a warning and proceed with execution. console.warn(`Unable to add response to browser cache: ${err}.`); }); } dispatchCallback(options.progress_callback, { status: 'done', name: path_or_repo_id, file: filename }); return buffer; } /** * Fetches a JSON file from a given path and file name. * * @param {string} modelPath The path to the directory containing the file. * @param {string} fileName The name of the file to fetch. * @param {boolean} [fatal=true] Whether to throw an error if the file is not found. * @param {PretrainedOptions} [options] An object containing optional parameters. * @returns {Promise<Object>} The JSON data parsed into a JavaScript object. * @throws Will throw an error if the file is not found and `fatal` is true. */ export async function getModelJSON(modelPath, fileName, fatal = true, options = {}) { let buffer = await getModelFile(modelPath, fileName, fatal, options); if (buffer === null) { // Return empty object return {} } let decoder = new TextDecoder('utf-8'); let jsonData = decoder.decode(buffer); return JSON.parse(jsonData); } /** * Read and track progress when reading a Response object * * @param {Response|FileResponse} response The Response object to read * @param {(data: {progress: number, loaded: number, total: number}) => void} progress_callback The function to call with progress updates * @returns {Promise<Uint8Array>} A Promise that resolves with the Uint8Array buffer */ async function readResponse(response, progress_callback) { const contentLength = response.headers.get('Content-Length'); if (contentLength === null) { console.warn('Unable to determine content-length from response headers. Will expand buffer when needed.') } let total = parseInt(contentLength ?? '0'); let buffer = new Uint8Array(total); let loaded = 0; const reader = response.body.getReader(); async function read() { const { done, value } = await reader.read(); if (done) return; let newLoaded = loaded + value.length; if (newLoaded > total) { total = newLoaded; // Adding the new data will overflow buffer. // In this case, we extend the buffer let newBuffer = new Uint8Array(total); // copy contents newBuffer.set(buffer); buffer = newBuffer; } buffer.set(value, loaded) loaded = newLoaded; const progress = (loaded / total) * 100; // Call your function here progress_callback({ progress: progress, loaded: loaded, total: total, }) return read(); } // Actually read await read(); return buffer; } /** * Joins multiple parts of a path into a single path, while handling leading and trailing slashes. * * @param {...string} parts Multiple parts of a path. * @returns {string} A string representing the joined path. */ function pathJoin(...parts) { // https://stackoverflow.com/a/55142565 parts = parts.map((part, index) => { if (index) { part = part.replace(new RegExp('^/'), ''); } if (index !== parts.length - 1) { part = part.replace(new RegExp('/$'), ''); } return part; }) return parts.join('/'); }
transformers.js/src/utils/hub.js/0
{ "file_path": "transformers.js/src/utils/hub.js", "repo_id": "transformers.js", "token_count": 10053 }
import { AutoImageProcessor, DPTFeatureExtractor, DPTImageProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // DPTFeatureExtractor describe("DPTFeatureExtractor", () => { const model_id = "Xenova/dpt-hybrid-midas"; /** @type {DPTFeatureExtractor} */ let processor; beforeAll(async () => { processor = await AutoImageProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "grayscale images", async () => { const image = await load_cached_image("cats"); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); expect(pixel_values.dims).toEqual([1, 3, 384, 384]); expect(pixel_values.mean().item()).toBeCloseTo(0.0372855559389454, 6); expect(original_sizes).toEqual([[480, 640]]); expect(reshaped_input_sizes).toEqual([[384, 384]]); }, MAX_TEST_EXECUTION_TIME, ); }); // DPTImageProcessor // - tests ensure_multiple_of // - tests keep_aspect_ratio // - tests bankers rounding describe("DPTImageProcessor", () => { const model_id = "Xenova/depth-anything-small-hf"; /** @type {DPTImageProcessor} */ let processor; beforeAll(async () => { processor = await AutoImageProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "ensure_multiple_of w/ normal rounding", async () => { const image = await load_cached_image("cats"); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); expect(pixel_values.dims).toEqual([1, 3, 518, 686]); expect(pixel_values.mean().item()).toBeCloseTo(0.30337387323379517, 3); expect(original_sizes).toEqual([[480, 640]]); expect(reshaped_input_sizes).toEqual([[518, 686]]); }, MAX_TEST_EXECUTION_TIME, ); it( "ensure_multiple_of w/ bankers rounding", async () => { const image = await load_cached_image("checkerboard_64x32"); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); // NOTE: without bankers rounding, this would be [1, 3, 266, 518] expect(pixel_values.dims).toEqual([1, 3, 252, 518]); expect(pixel_values.mean().item()).toBeCloseTo(0.2267402559518814, 1); expect(original_sizes).toEqual([[32, 64]]); expect(reshaped_input_sizes).toEqual([[252, 518]]); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/dpt/test_image_processing_dpt.js/0
{ "file_path": "transformers.js/tests/models/dpt/test_image_processing_dpt.js", "repo_id": "transformers.js", "token_count": 1111 }
import { MgpstrProcessor, MgpstrForSceneTextRecognition } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("MgpstrForSceneTextRecognition", () => { const model_id = "onnx-community/tiny-random-MgpstrForSceneTextRecognition"; /** @type {MgpstrForSceneTextRecognition} */ let model; /** @type {MgpstrProcessor} */ let processor; beforeAll(async () => { model = await MgpstrForSceneTextRecognition.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); processor = await MgpstrProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); const TARGETS = { white_image: { generated_text: ["mmmmmmmmmmmmmmmmmmmmmmmmmm"], scores: [3.5553885547065065e-27], char_preds: ["mmmmmmmmmmmmmmmmmmmmmmmmmm"], bpe_preds: ["wwwwwwwwwwwwwwwwwwwwwwwwww"], wp_preds: ["[unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65][unused65]"], }, blue_image: { generated_text: ["11111111111111111111111111"], scores: [9.739909092663214e-32], char_preds: ["11111111111111111111111111"], bpe_preds: ["22222222222222222222222222"], wp_preds: ["[unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59][unused59]"], }, }; it( "batch_size=1", async () => { const image_id = "white_image"; const image = await load_cached_image(image_id); const inputs = await processor(image); const outputs = await model(inputs); const { max_token_length, num_character_labels, num_bpe_labels, num_wordpiece_labels } = model.config; expect(outputs.char_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 38 */ num_character_labels]); expect(outputs.bpe_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 99 */ num_bpe_labels]); expect(outputs.wp_logits.dims).toEqual([1, /* 27 */ max_token_length, /* 99 */ num_wordpiece_labels]); const decoded = processor.batch_decode(outputs.logits); expect(decoded).toBeCloseToNested(TARGETS[image_id]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const image_ids = ["white_image", "blue_image"]; const images = await Promise.all(image_ids.map((image_id) => load_cached_image(image_id))); const inputs = await processor(images); const outputs = await model(inputs); const { max_token_length, num_character_labels, num_bpe_labels, num_wordpiece_labels } = model.config; expect(outputs.char_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 38 */ num_character_labels]); expect(outputs.bpe_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 99 */ num_bpe_labels]); expect(outputs.wp_logits.dims).toEqual([images.length, /* 27 */ max_token_length, /* 99 */ num_wordpiece_labels]); const decoded = processor.batch_decode(outputs.logits); const target = image_ids.reduce((acc, image_id) => { for (const key in TARGETS[image_id]) (acc[key] ??= []).push(...TARGETS[image_id][key]); return acc; }, {}); expect(decoded).toBeCloseToNested(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/mgp_str/test_modeling_mgp_str.js/0
{ "file_path": "transformers.js/tests/models/mgp_str/test_modeling_mgp_str.js", "repo_id": "transformers.js", "token_count": 1680 }
import { AutoProcessor, PaliGemmaProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { const model_id = "hf-internal-testing/tiny-random-PaliGemmaForConditionalGeneration"; describe("PaliGemmaProcessor", () => { /** @type {PaliGemmaProcessor} */ let processor; let images = {}; beforeAll(async () => { processor = await AutoProcessor.from_pretrained(model_id); images = { white_image: await load_cached_image("white_image"), }; }, MAX_PROCESSOR_LOAD_TIME); it( "Image-only (default text)", async () => { const { input_ids, pixel_values } = await processor(images.white_image); expect(input_ids.dims).toEqual([1, 258]); expect(pixel_values.dims).toEqual([1, 3, 224, 224]); }, MAX_TEST_EXECUTION_TIME, ); it( "Single image & text", async () => { const { input_ids, pixel_values } = await processor(images.white_image, "<image>What is on the flower?"); expect(input_ids.dims).toEqual([1, 264]); expect(pixel_values.dims).toEqual([1, 3, 224, 224]); }, MAX_TEST_EXECUTION_TIME, ); it( "Multiple images & text", async () => { const { input_ids, pixel_values } = await processor([images.white_image, images.white_image], "<image><image>Describe the images."); expect(input_ids.dims).toEqual([1, 518]); expect(pixel_values.dims).toEqual([2, 3, 224, 224]); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/paligemma/test_processor_paligemma.js/0
{ "file_path": "transformers.js/tests/models/paligemma/test_processor_paligemma.js", "repo_id": "transformers.js", "token_count": 709 }
import { pipeline, AutomaticSpeechRecognitionPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "automatic-speech-recognition"; export default () => { describe("Automatic Speech Recognition", () => { describe("whisper", () => { const model_id = "Xenova/tiny-random-WhisperForConditionalGeneration"; const SAMPLING_RATE = 16000; const audios = [new Float32Array(SAMPLING_RATE).fill(0), Float32Array.from({ length: SAMPLING_RATE }, (_, i) => i / 16000)]; const long_audios = [new Float32Array(SAMPLING_RATE * 60).fill(0), Float32Array.from({ length: SAMPLING_RATE * 60 }, (_, i) => (i % 1000) / 1000)]; const max_new_tokens = 5; /** @type {AutomaticSpeechRecognitionPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of AutomaticSpeechRecognitionPipeline", () => { expect(pipe).toBeInstanceOf(AutomaticSpeechRecognitionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(audios[0], { max_new_tokens }); const target = { text: "àž™àž°àž„àž°àž™àž°àž„àž°URURUR" }; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); it( "transcribe w/ return_timestamps=true", async () => { const output = await pipe(audios[0], { return_timestamps: true, max_new_tokens }); const target = { text: " riceUR", chunks: [ { timestamp: [0.72, 17.72], text: " rice" }, { timestamp: [17.72, null], text: "UR" }, ], }; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); // TODO add: transcribe w/ return_timestamps="word" // it( // "transcribe w/ word-level timestamps", // async () => { // const output = await pipe(audios[0], { return_timestamps: "word", max_new_tokens }); // const target = []; // expect(output).toBeCloseToNested(target, 5); // }, // MAX_TEST_EXECUTION_TIME, // ); it( "transcribe w/ language", async () => { const output = await pipe(audios[0], { language: "french", task: "transcribe", max_new_tokens }); const target = { text: "àž™àž°àž„àž°àž™àž°àž„àž°URURUR" }; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); it( "translate", async () => { const output = await pipe(audios[0], { language: "french", task: "translate", max_new_tokens }); const target = { text: "àž™àž°àž„àž°àž™àž°àž„àž°URURUR" }; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); it( "audio > 30 seconds", async () => { const output = await pipe(long_audios[0], { chunk_length_s: 30, stride_length_s: 5, max_new_tokens }); const target = { text: "àž™àž°àž„àž°àž™àž°àž„àž°URURUR" }; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("wav2vec2", () => { const model_id = "Xenova/tiny-random-Wav2Vec2ForCTC-ONNX"; const SAMPLING_RATE = 16000; const audios = [new Float32Array(SAMPLING_RATE).fill(0), Float32Array.from({ length: SAMPLING_RATE }, (_, i) => i / 16000)]; const long_audios = [new Float32Array(SAMPLING_RATE * 60).fill(0), Float32Array.from({ length: SAMPLING_RATE * 60 }, (_, i) => (i % 1000) / 1000)]; const max_new_tokens = 5; /** @type {AutomaticSpeechRecognitionPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of AutomaticSpeechRecognitionPipeline", () => { expect(pipe).toBeInstanceOf(AutomaticSpeechRecognitionPipeline); }); describe("batch_size=1", () => { it( "default", async () => { const output = await pipe(audios[0], { max_new_tokens }); const target = { text: "<unk>K" }; expect(output).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_automatic_speech_recognition.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_automatic_speech_recognition.js", "repo_id": "transformers.js", "token_count": 2373 }
import { pipeline, TextToAudioPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "text-to-audio"; export default () => { describe("Text to Audio", () => { const model_id = "Xenova/tiny-random-vits"; /** @type {TextToAudioPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of TextToAudioPipeline", () => { expect(pipe).toBeInstanceOf(TextToAudioPipeline); }); it( "default", async () => { const output = await pipe("hello"); expect(output.audio).toHaveLength(6400); // NOTE: The mean value is not deterministic, so we just check the first few digits expect(output.audio.reduce((a, b) => a + b, 0) / output.audio.length).toBeCloseTo(-0.0125, 2); expect(output.sampling_rate).toEqual(16000); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_text_to_audio.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_text_to_audio.js", "repo_id": "transformers.js", "token_count": 492 }
import { Tensor, cat, stack, layer_norm, ones_like, zeros_like, full_like, rand, std_mean } from "../../src/transformers.js"; import { init } from "../init.js"; import { compare } from "../test_utils.js"; init(); describe("Tensor operations", () => { describe("cat", () => { it("should concatenate on dim=0", () => { const t1 = new Tensor("float32", [1, 2, 3], [1, 3]); const t2 = new Tensor("float32", [4, 5, 6, 7, 8, 9], [2, 3]); const t3 = new Tensor("float32", [10, 11, 12], [1, 3]); const target1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9], [3, 3]); const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [4, 3]); // 2 tensors const concatenated1 = cat([t1, t2], 0); compare(concatenated1, target1, 1e-3); // 3 tensors const concatenated2 = cat([t1, t2, t3], 0); compare(concatenated2, target2, 1e-3); }); it("should concatenate on dim=1", () => { const t1 = new Tensor("float32", [1, 2, 3, -1, -2, -3], [2, 3, 1]); const t2 = new Tensor("float32", [4, -4], [2, 1, 1]); const t3 = new Tensor("float32", [5, 6, -5, -6], [2, 2, 1]); const target1 = new Tensor("float32", [1, 2, 3, 4, -1, -2, -3, -4], [2, 4, 1]); const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6, -1, -2, -3, -4, -5, -6], [2, 6, 1]); // 2 tensors const concatenated1 = cat([t1, t2], 1); compare(concatenated1, target1, 1e-3); // 3 tensors const concatenated2 = cat([t1, t2, t3], 1); compare(concatenated2, target2, 1e-3); }); it("should concatenate on dim=-2", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16], [2, 1, 3, 2]); const t2 = new Tensor("float32", [7, 8, 9, 10, 17, 18, 19, 20], [2, 1, 2, 2]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [2, 1, 5, 2]); const concatenated = cat([t1, t2], -2); compare(concatenated, target, 1e-3); }); // TODO add tests for errors }); describe("slice", () => { it("should return a given row dim", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice(1); const target = new Tensor("float32", [3, 4], [2]); compare(t2, target); }); it("should return a range of rows", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const t2 = t1.slice([1, 3]); const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]); compare(t2, target); }); it("should return a crop", () => { const t1 = new Tensor( "float32", Array.from({ length: 28 }, (_, i) => i + 1), [4, 7], ); const t2 = t1.slice([1, -1], [1, -1]); const target = new Tensor("float32", [9, 10, 11, 12, 13, 16, 17, 18, 19, 20], [2, 5]); compare(t2, target); }); }); describe("stack", () => { const t1 = new Tensor("float32", [0, 1, 2, 3, 4, 5], [1, 3, 2]); it("should stack on dim=0", () => { const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [2, 1, 3, 2]); const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [3, 1, 3, 2]); // 2 tensors const stacked1 = stack([t1, t1], 0); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], 0); compare(stacked2, target2, 1e-3); }); it("should stack on dim=1", () => { const target1 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 2, 3, 2]); const target2 = new Tensor("float32", [0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5], [1, 3, 3, 2]); // 2 tensors const stacked1 = stack([t1, t1], 1); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], 1); compare(stacked2, target2, 1e-3); }); it("should stack on dim=-1", () => { const target1 = new Tensor("float32", [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5], [1, 3, 2, 2]); const target2 = new Tensor("float32", [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5], [1, 3, 2, 3]); // 2 tensors const stacked1 = stack([t1, t1], -1); compare(stacked1, target1, 1e-3); // 3 tensors const stacked2 = stack([t1, t1, t1], -1); compare(stacked2, target2, 1e-3); }); }); describe("permute", () => { it("should permute", () => { const x = new Tensor("float32", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [2, 3, 4]); // Permute axes to (0, 1, 2) - No change const permuted_1 = x.permute(0, 1, 2); const target_1 = x; compare(permuted_1, target_1, 1e-3); // Permute axes to (0, 2, 1) const permuted_2 = x.permute(0, 2, 1); const target_2 = new Tensor("float32", [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23], [2, 4, 3]); compare(permuted_2, target_2, 1e-3); // Permute axes to (1, 0, 2) const permuted_3 = x.permute(1, 0, 2); const target_3 = new Tensor("float32", [0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23], [3, 2, 4]); compare(permuted_3, target_3, 1e-3); // Permute axes to (1, 2, 0) const permuted_4 = x.permute(1, 2, 0); const target_4 = new Tensor("float32", [0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23], [3, 4, 2]); compare(permuted_4, target_4, 1e-3); // Permute axes to (2, 0, 1) const permuted_5 = x.permute(2, 0, 1); const target_5 = new Tensor("float32", [0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23], [4, 2, 3]); compare(permuted_5, target_5, 1e-3); // Permute axes to (2, 1, 0) const permuted_6 = x.permute(2, 1, 0); const target_6 = new Tensor("float32", [0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21, 2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23], [4, 3, 2]); compare(permuted_6, target_6, 1e-3); }); }); describe("map", () => { it("should double", () => { const original = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [2, 4, 6, 8, 10, 12], [2, 3]); const doubled = original.map((x) => x * 2); compare(doubled, target, 1e-3); }); }); describe("mean", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3, 1]); it("should calculate mean over the entire tensor", () => { const target = new Tensor("float32", [3.5], []); compare(t1.mean(), target, 1e-3); }); it("should calculate mean over dimension 0", () => { const target0 = new Tensor("float32", [2.5, 3.5, 4.5], [3, 1]); compare(t1.mean(0), target0, 1e-3); }); it("should calculate mean over dimension 1", () => { const target1 = new Tensor("float32", [2, 5], [2, 1]); compare(t1.mean(1), target1, 1e-3); }); it("should calculate mean over dimension -1", () => { const target2 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); compare(t1.mean(-1), target2, 1e-3); }); }); describe("std_mean", () => { it("should return std_mean for the entire tensor", () => { const t = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const [stdVal, meanVal] = std_mean(t); compare(stdVal, new Tensor("float32", [1.8708287477493286], []), 1e-3); compare(meanVal, new Tensor("float32", [3.5], []), 1e-3); }); }); describe("min", () => { it("should return the minimum over the entire tensor", () => { const t1 = new Tensor("float32", [3, -2, 5, 0], [2, 2]); const target = new Tensor("float32", [-2], []); const result = t1.min(); compare(result, target, 1e-3); }); it("should return the minimum over dimension 1", () => { const t2 = new Tensor("float32", [4, 2, -1, 0, 6, 5], [3, 2]); const target = new Tensor("float32", [2, -1, 5], [3]); const result = t2.min(1); compare(result, target, 1e-3); }); }); describe("max", () => { it("should return the maximum over the entire tensor", () => { const t1 = new Tensor("float32", [3, 10, -2, 7], [2, 2]); const target = new Tensor("float32", [10], []); const result = t1.max(); compare(result, target, 1e-3); }); it("should return the maximum over dimension 0", () => { const t2 = new Tensor("float32", [1, 2, 4, 5, 9, 3], [3, 2]); const target = new Tensor("float32", [9, 5], [2]); const result = t2.max(0); compare(result, target, 1e-3); }); }); describe("sum", () => { it("should calculate sum over entire tensor", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [21], []); const result = t1.sum(); compare(result, target, 1e-3); }); it("should calculate sum over dimension 0", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [5, 7, 9], [3]); const result = t1.sum(0); compare(result, target, 1e-3); }); it("should calculate sum over dimension 1", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [6, 15], [2]); const result = t1.sum(1); compare(result, target, 1e-3); }); }); describe("norm", () => { it("should calculate L2 norm over entire tensor", () => { const t1 = new Tensor("float32", [3, 4], [2]); const target = new Tensor("float32", [5], []); const result = t1.norm(); compare(result, target, 1e-3); }); it("should calculate L2 norm over dimension 0", () => { const t1 = new Tensor("float32", [3, 4, 6, 8], [2, 2]); const target = new Tensor("float32", [6.7082, 8.9443], [2]); const result = t1.norm(2, 0); compare(result, target, 1e-2); }); }); describe("normalize", () => { it("should normalize a vector correctly", () => { const t1 = new Tensor("float32", [3, 4], [1, 2]); const target = new Tensor("float32", [0.6, 0.8], [1, 2]); const normalized = t1.normalize(); compare(normalized, target, 1e-3); }); it("should normalize along dimension", () => { const t1 = new Tensor("float32", [1, 2, 2, 3], [2, 2]); const target = new Tensor("float32", [0.4472, 0.8944, 0.5547, 0.8321], [2, 2]); const normalized = t1.normalize(); compare(normalized, target, 1e-3); }); }); describe("layer_norm", () => { it("should calculate layer norm", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [-1.2247356176376343, 0.0, 1.2247356176376343, -1.2247357368469238, -1.1920928955078125e-7, 1.2247354984283447], [2, 3]); const norm = layer_norm(t1, [t1.dims.at(-1)]); compare(norm, target, 1e-3); }); }); describe("sigmoid", () => { it("should apply the sigmoid function to each element in the tensor", () => { const t1 = new Tensor("float32", [0, 1, -1, 5, -5], [5]); const target = new Tensor("float32", [0.5, 1 / (1 + Math.exp(-1)), 1 / (1 + Math.exp(1)), 1 / (1 + Math.exp(-5)), 1 / (1 + Math.exp(5))], [5]); const result = t1.sigmoid(); compare(result, target, 1e-3); }); }); describe("tolist", () => { it("should return nested arrays for a 2D tensor", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const arr = t1.tolist(); compare(arr, [ [1, 2], [3, 4], ]); }); }); describe("mul", () => { it("should multiply constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [2, 4, 6, 8], [2, 2]); const result = t1.mul(2); compare(result, target, 1e-3); }); }); describe("div", () => { it("should divide constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [0.5, 1, 1.5, 2], [2, 2]); const result = t1.div(2); compare(result, target, 1e-3); }); }); describe("add", () => { it("should add constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [3, 4, 5, 6], [2, 2]); const result = t1.add(2); compare(result, target, 1e-3); }); }); describe("sub", () => { it("should subtract constant", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const target = new Tensor("float32", [-1, 0, 1, 2], [2, 2]); const result = t1.sub(2); compare(result, target, 1e-3); }); }); describe("gt", () => { it("should perform element-wise greater than comparison with a scalar", () => { const t1 = new Tensor("float32", [1, 5, 3, 7], [4]); const target = new Tensor("bool", [0, 1, 0, 1], [4]); const result = t1.gt(4); compare(result, target, 1e-3); }); }); describe("lt", () => { it("should perform element-wise less than comparison with a scalar", () => { const t1 = new Tensor("float32", [1, 5, 3, 7], [4]); const target = new Tensor("bool", [1, 0, 1, 0], [4]); const result = t1.lt(4); compare(result, target, 1e-3); }); }); describe("squeeze", () => { it("should remove all dimensions of size 1", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 4]); const target = new Tensor("float32", [1, 2, 3, 4], [4]); const result = t1.squeeze(); compare(result, target, 1e-3); }); it("should remove a specified dimension", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 2]); const result = t1.squeeze(1); const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]); compare(result, target, 1e-3); }); it("should remove multiple dimensions", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [1, 1, 2, 1, 2]); const result = t1.squeeze([0, 3]); const target = new Tensor("float32", [1, 2, 3, 4], [1, 2, 2]); compare(result, target, 1e-3); }); }); describe("unsqueeze", () => { it("should add a dimension at the specified axis", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [4]); const target = new Tensor("float32", [1, 2, 3, 4], [1, 4]); const result = t1.unsqueeze(0); compare(result, target, 1e-3); }); }); describe("flatten", () => { it("should flatten a 2D tensor into 1D by default", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [6]); const result = t1.flatten(); compare(result, target, 1e-3); }); }); describe("neg", () => { it("should compute the negative of each element in the tensor", () => { const t1 = new Tensor("float32", [1, -2, 0, 3], [4]); const target = new Tensor("float32", [-1, 2, -0, -3], [4]); const result = t1.neg(); compare(result, target, 1e-3); }); }); describe("view", () => { it("should reshape the tensor to the specified dimensions", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [3, 2]); const result = t1.view(3, 2); compare(result, target, 1e-3); }); it("should reshape the tensor with an inferred dimension (-1)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [1, 6]); const result = t1.view(1, -1); compare(result, target, 1e-3); }); it("should throw if multiple inferred dimensions are used", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); expect(() => t1.view(-1, -1)).toThrow(); }); }); describe("clamp", () => { it("should clamp values between min and max", () => { const t1 = new Tensor("float32", [-2, -1, 0, 1, 2, 3], [6]); const target = new Tensor("float32", [-1, -1, 0, 1, 2, 2], [6]); const result = t1.clamp(-1, 2); compare(result, target, 1e-3); }); }); describe("round", () => { it("should round elements to the nearest integer", () => { const t1 = new Tensor("float32", [0.1, 1.4, 2.5, 3.9, -1.2], [5]); const target = new Tensor("float32", [0, 1, 3, 4, -1], [5]); const result = t1.round(); compare(result, target, 1e-3); }); }); describe("ones_like", () => { it("should create a tensor of all ones with the same shape as the input", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = ones_like(t1); const target = new Tensor("int64", [1n, 1n, 1n, 1n], [2, 2]); compare(result, target, 1e-3); }); }); describe("zeros_like", () => { it("should create a tensor of all zeros with the same shape as the input", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = zeros_like(t1); const target = new Tensor("int64", [0n, 0n, 0n, 0n], [2, 2]); compare(result, target, 1e-3); }); }); describe("full_like", () => { it("should create a tensor filled with a number, matching the shape of the original", () => { const t1 = new Tensor("float32", [1, 2, 3, 4], [2, 2]); const result = full_like(t1, 10); const target = new Tensor("float32", [10, 10, 10, 10], [2, 2]); compare(result, target, 1e-3); }); it("should create a boolean tensor with the same shape", () => { const t2 = new Tensor("bool", [true, false], [2]); const result = full_like(t2, true); const target = new Tensor("bool", [true, true], [2]); compare(result, target, 1e-3); }); it("should create a bigint tensor with the same shape", () => { const t3 = new Tensor("int64", [1n, 2n], [2]); const result = full_like(t3, 123n); const target = new Tensor("int64", [123n, 123n], [2]); compare(result, target, 1e-3); }); }); describe("rand", () => { it("should create a tensor of random values between 0 and 1 with the given shape", () => { const shape = [2, 2]; const random = rand(shape); expect(random.type).toBe("float32"); expect(random.dims).toEqual(shape); random.data.forEach((val) => { expect(val).toBeGreaterThanOrEqual(0); expect(val).toBeLessThan(1); }); }); }); describe("to", () => { it("float32 to int32 (number to number)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("int32", [1, 2, 3, 4, 5, 6], [2, 3]); const t2 = t1.to("int32"); compare(t2, target); }); it("float32 to int64 (number to bigint)", () => { const t1 = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const target = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]); const t2 = t1.to("int64"); compare(t2, target); }); it("int64 to float32 (bigint to number)", () => { const t1 = new Tensor("int64", [1n, 2n, 3n, 4n, 5n, 6n], [2, 3]); const target = new Tensor("float32", [1, 2, 3, 4, 5, 6], [2, 3]); const t2 = t1.to("float32"); compare(t2, target); }); it("int32 to uint32", () => { const t1 = new Tensor("int32", [-1, 2, -3, 4, -5, 6], [2, 3]); const target = new Tensor("uint32", [4294967295, 2, 4294967293, 4, 4294967291, 6], [2, 3]); const t2 = t1.to("uint32"); compare(t2, target); }); it("int16 to int8 (overflow)", () => { const t1 = new Tensor("int16", [0, 1, 128, 256, 257, 512], [2, 3]); const target = new Tensor("int8", [0, 1, -128, 0, 1, 0], [2, 3]); const t2 = t1.to("int8"); compare(t2, target); }); }); });
transformers.js/tests/utils/tensor.test.js/0
{ "file_path": "transformers.js/tests/utils/tensor.test.js", "repo_id": "transformers.js", "token_count": 9122 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run benchmark using the `optimum-benchmark` library with some customization in `transformers`. Assume we are under `transformers` root directory: (make sure the commits are valid commits) ```bash python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun ``` """ import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument( "--commit", type=list_str, default="", help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.", ) parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.") parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.") parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.") parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).") args, optimum_benchmark_args = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = [ "prefill.latency.mean", "prefill.throughput.value", "decode.latency.mean", "decode.throughput.value", "per_token.latency.mean", "per_token.throughput.value", ] if args.metrics is not None: metrics = args.metrics.split(",") # Get `backend.model` in a hacky way: We want to control the experiment flow manually. models = [""] for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("backend.model="): models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == "diff": # compare to `main` commits = ["main", current_head] # Get the specified run directory run_dir_arg_idx, run_dir = -1, None sweep_dir_arg_idx, sweep_dir = -1, None for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("hydra.run.dir="): run_dir = arg[len("hydra.run.dir=") :] run_dir_arg_idx = idx elif arg.startswith("hydra.sweep.dir="): sweep_dir = arg[len("hydra.sweep.dir=") :] sweep_dir_arg_idx = idx exp_run_dir, arg_dix, arg_name = ( (sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir") if "--multirun" in optimum_benchmark_args else (run_dir, run_dir_arg_idx, "hydra.run.dir") ) # TODO: not hardcoded if exp_run_dir is None and args.ensure_empty: exp_run_dir = "_benchmark" if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models: model_arg = [f"backend.model={model}"] if model != "" else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}" else: dir_args = [ f"hydra.sweep.dir={commit_run_dir}", f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}", ] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: # Need to remove the `\` character summaries = summarize(commit_run_dir.replace("\\", ""), metrics) run_summaries.extend(summaries) # aggregate the information across the commits if exp_run_dir is not None: with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: # Upload to Hub api = HfApi() api.upload_folder( folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type="dataset", token=args.token, )
transformers/benchmark/benchmark.py/0
{ "file_path": "transformers/benchmark/benchmark.py", "repo_id": "transformers", "token_count": 5440 }
FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main USER root RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-cache-dir --no-deps timm accelerate RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk # RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset' # RUN git clone https://github.com/facebookresearch/detectron2.git # RUN python3 -m pip install --no-cache-dir -e detectron2 RUN pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3' RUN pip uninstall -y transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/*
transformers/docker/exotic-models.dockerfile/0
{ "file_path": "transformers/docker/exotic-models.dockerfile", "repo_id": "transformers", "token_count": 468 }
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 FROM nvcr.io/nvidia/pytorch:23.11-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' RUN apt -y update RUN apt install -y libaio-dev RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip uninstall -y torch torchvision torchaudio # Install **nightly** release PyTorch (flag `--pre`) # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # Uninstall `transformer-engine` shipped with the base image RUN python3 -m pip uninstall -y transformer-engine # Uninstall `torch-tensorrt` and `apex` shipped with the base image RUN python3 -m pip uninstall -y torch-tensorrt apex # Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout) RUN python3 -m pip uninstall -y deepspeed # This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.) # Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010 # RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \ # DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 ## For `torchdynamo` tests ## (see https://github.com/huggingface/transformers/pull/17765) #RUN git clone https://github.com/pytorch/functorch #RUN python3 -m pip install --no-cache-dir ./functorch[aot] #RUN cd functorch && python3 setup.py develop # #RUN git clone https://github.com/pytorch/torchdynamo #RUN python3 -m pip install -r ./torchdynamo/requirements.txt #RUN cd torchdynamo && python3 setup.py develop # ## install TensorRT #RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex #RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2 # ## install torch_tensorrt (fx path) #RUN git clone https://github.com/pytorch/TensorRT.git #RUN cd TensorRT/py && python3 setup.py install --fx-only # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop # Disable for now as deepspeed is not installed above. To be enabled once the issue is fixed. # RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile", "repo_id": "transformers", "token_count": 1032 }
# تحميل نماذج مدرؚة مسؚقًا ؚاستخدام AutoClass لم ترغؚ في إن؎اء محول معماري لم؀؎ر التراؚط الخاص ؚك، فهناك العديد من محولات المعمارية المختلفة التي يمكنك الاختيار من ؚينها. كجزء من الفلسفة الأساسية لـ 🀗 Transformers لجعل المكتؚة سهلة وؚسيطة ومرنة، فإن ف؊ة `AutoClass` تستدل تلقا؊يًا وتحمّل الؚنية الصحيحة من نسخة نموذج (Model Checkpoint) معينة. تسمح لك طريقة `from_pretrained()` ؚتحميل نموذج مُدرؚ مسؚقًا لأي ؚنية ؚسرعة حتى لا تضطر إلى تكريس الوقت والموارد لتدريؚ نموذج من الصفر. إن إنتاج هذا النوع من التعليمات الؚرمجية غير المعتمدة على نسخ يعني أنه إذا نجح رمزك مع ننسخة واحدة، فسيتم ت؎غيله مع أخرى - طالما تم تدريؚه لمهمة مماثلة - حتى إذا كانت الؚنية المعمارية مختلفة. تذكر أن الؚنية ت؎ير إلى هيكل النموذج، والنسخ هي الأوزان لؚنية معمارية معينة. على سؚيل المثال، [BERT](https://huggingface.co/google-bert/bert-base-uncased) هي ؚنية معمارية، في حين أن `google-bert/bert-base-uncased` هي نسخة. "النموذج" هو مصطلح عام يمكن أن يعني إما الؚنية أو نالنسخة. في هذا الؚرنامج التعليمي، ستتعلم كيفية: * تحميل مُجزّ؊ الرموز مُدرؚ مسؚقًا * تحميل معالج صور مُدرؚ مسؚقًا * تحميل مستخرج ميزات مُدرؚ مسؚقًا * تحميل معالج مُدرؚ مسؚقًا * تحميل نموذج مُدرؚ مسؚقًا * تحميل نموذج كعمود فقري ## AutoTokenizer تؚدأ كل مهمة NLP تقريًؚا ؚمُجزّ؊ للرموز. يقوم المُجزّ؊ ؚتحويل النص إلى ؎كل يمكن للنموذج معالجته. قم ؚتحميل المُجزّ؊ ؚاستخدام [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` ثم قم ؚتحليل إدخالك على النحو الموضح أدناه: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## معالج الصور التلقا؊ي (AutoImageProcessor) ؚالنسؚة لمهمات الر؀ية، يقوم معالج الصور ؚمعالجة الصورة إلى تنسيق الإدخال الصحيح. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoBackbone <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">الصورة توضح مخطط مراحل نموذج Swin.</figcaption> </div> يسمح لك [`AutoBackbone`] ؚاستخدام النماذج المُدرؚة مسؚقًا كعمود فقري للحصول على خرا؊ط ميزات من مراحل مختلفة من العمود الفقري. يجؚ عليك تحديد أحد المعلمات التالية في [`~PretrainedConfig.from_pretrained`]: * `out_indices` هو فهرس الطؚقة التي تريد الحصول على خريطة الميزات منها * `out_features` هو اسم الطؚقة التي تريد الحصول على خريطة الميزات منها يمكن استخدام هذه المعلمات ؚ؎كل متؚادل، ولكن إذا كنت تستخدم كلاً منها، فتأكد من أنها متوا؊مة مع ؚعضها الؚعض! إذا لم تمرر أيًا من هذه المعلمات، فسيقوم العمود الفقري ؚإرجاع خريطة الميزات من الطؚقة الأخيرة. <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">صورة توضح خريطة ميزات من المرحلة الأولى للعمود الفقري.</figcaption> </div> على سؚيل المثال، في الرسم التخطيطي أعلاه، لإرجاع خريطة الميزات من المرحلة الأولى من العمود الفقري Swin، يمكنك تعيين `out_indices=(1,)`: ```py >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,)) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps ``` الآن يمكنك الوصول إلى كا؊ن `feature_maps` من المرحلة الأولى من العمود الفقري: ```py >>> list(feature_maps[0].shape) [1, 96, 56, 56] ``` ## مستخرج الميزات التلقا؊ي (AutoFeatureExtractor) ؚالنسؚة للمهام الصوتية، يقوم مستخرج الميزات ؚمعالجة إ؎ارة الصوت إلى تنسيق الإدخال الصحيح. قم ؚتحميل مستخرج ميزات ؚاستخدام [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## المعالج التلقا؊ي (AutoProcessor) تتطلؚ المهام متعددة الوسا؊ط معالجًا يجمع ؚين نوعين من أدوات المعالجة المسؚقة. على سؚيل المثال، يتطلؚ نموذج [LayoutLMV2](model_doc/layoutlmv2) معالج صور لمعالجة الصور ومُجزّ؊ لمعالجة النص؛ يجمع المعالج كليهما. قم ؚتحميل معالج ؚاستخدام [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## النموذج التلقا؊ي (AutoModel) <frameworkcontent> <pt> تسمح لك ف؊ات `AutoModelFor` ؚتحميل نموذج مُدرؚ مسؚقًا لمهمة معينة (راجع [هنا](model_doc/auto) للحصول على قا؊مة كاملة ؚالمهام المتاحة). على سؚيل المثال، قم ؚتحميل نموذج لتصنيف التسلسل ؚاستخدام [`AutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` أعد استخدام نفس نقطة التفتي؎ لتحميل ؚنية لمهمة مختلفة: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> ؚالنسؚة لنماذج PyTorch، تستخدم طريقة `from_pretrained()` `torch.load()` التي تستخدم داخليًا `pickle` والتي يُعرف أنها غير آمنة. ؚ؎كل عام، لا تقم مطلقًا ؚتحميل نموذج قد يكون مصدره مصدرًا غير موثوق ؚه، أو قد يكون تم العؚث ØšÙ‡. يتم تخفيف هذا الخطر الأمني جز؊يًا للنماذج العامة المستضافة على Hub Hugging Face، والتي يتم [فحصها ؚحثًا عن الؚرامج الضارة](https://huggingface.co/docs/hub/security-malware) في كل ارتكاؚ. راجع [توثيق Hub](https://huggingface.co/docs/hub/security) للحصول على أفضل الممارسات مثل [التحقق من التوقيع](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) ؚاستخدام GPG. لا تتأثر نقاط تفتي؎ TensorFlow و Flax، ويمكن تحميلها داخل ؚنيات PyTorch ؚاستخدام `from_tf` و `from_flax` kwargs لطريقة `from_pretrained` للتحايل على هذه الم؎كلة. </Tip> ؚ؎كل عام، نوصي ؚاستخدام ف؊ة `AutoTokenizer` وف؊ة `AutoModelFor` لتحميل مثيلات مُدرؚة مسؚقًا من النماذج. سيساعدك هذا في تحميل الؚنية الصحيحة في كل مرة. في الؚرنامج التعليمي التالي، تعرف على كيفية استخدام المحلل اللغوي ومعالج الصور ومستخرج الميزات والمعالج الذي تم تحميله حديثًا لمعالجة مجموعة ؚيانات للضؚط الدقيق. </pt> <tf> أخيرًا، تسمح لك ف؊ات `TFAutoModelFor` ؚتحميل نموذج مُدرؚ مسؚقًا لمهمة معينة (راجع [هنا](model_doc/auto) للحصول على قا؊مة كاملة ؚالمهام المتاحة). على سؚيل المثال، قم ؚتحميل نموذج لتصنيف التسلسل ؚاستخدام [`TFAutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` أعد استخدام نفس نقطة التفتي؎ لتحميل ؚنية لمهمة مختلفة: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` ؚ؎كل عام، نوصي ؚاستخدام ف؊ة `AutoTokenizer` وف؊ة `TFAutoModelFor` لتحميل نسخ لنماذج مُدرؚة مسؚقًا. سيساعدك هذا في تحميل الؚنية الصحيحة في كل مرة. في الؚرنامج التعليمي التالي، ستتعرف على كيفية استخدام المُجزّ؊ اللغوي ومعالج الصور ومستخرج الميزات والمعالج الذي تم تحميله حديثًا لمعالجة مجموعة ؚيانات للضؚط الدقيق. </tf> </frameworkcontent>
transformers/docs/source/ar/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/ar/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 5440 }
# ؎ارك نموذجك مع العالم أ؞هرت آخر درسين تعليميين كيفية ضؚط نموذج ؚدقة ؚاستخدام PyTorch و Keras و 🀗 Accelerate لعمليات التهي؊ة الموزعة. والخطوة التالية هي م؎اركة نموذجك مع المجتمع! في Hugging Face، ن؀من ؚالم؎اركة المفتوحة للمعرفة والموارد لتمكين الجميع من الاستفادة من الذكاء الاصطناعي. ون؎جعك على م؎اركة نموذجك مع المجتمع لمساعدة الآخرين على توفير الوقت والموارد. في هذا الدرس، ستتعلم طريقتين لم؎اركة نموذجك المدرؚ أو مضؚوط على منصة [Model Hub](https://huggingface.co/models): - رفع ملفاتك إلى منصة Hub مؚا؎رة ؚاستخدام الكود الؚرمجي. - قم ؚسحؚ وإفلات ملفاتك إلى Hub ؚاستخدام الواجهة web. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="م؎غل فيديو YouTube" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> لم؎اركة نموذج مع المجتمع، تحتاج إلى حساؚ على [huggingface.co](https://huggingface.co/join). يمكنك أيضًا الانضمام إلى من؞مة موجودة أو إن؎اء من؞مة جديدة. </Tip> ## ميزات المستودع يعمل كل مستودع على Model Hub مثل مستودع GitHub النتقليدي. تقدم مستودعاتنا التحكم في الإصدارات وسجل التغييرات، وقدرة على ر؀ية الاختلافات ؚين الإصدارات. تعتمد آلية التحكم في الإصدارات على منصة Model Hub على ن؞امي git و [git-lfs](https://git-lfs.github.com/). وؚعؚارة أخرى، يمكنك التعامل مع كل نموذج كأنه مستودع مستقل، مما يمكّن من زيادة التحكم في الوصول والقاؚلية للتطوير. يسمح التحكم في الإصدار ؚإجراء تعديلات وتثؚيت إصدار محدد من النموذج ؚاستخدام رمز التغيير (commit hash) أو وسم (tag) أو فرع (branch). ؚفضل هذه الميزة، يمكنك تحميل إصدار محدد من النموذج ؚاستخدام معلمة الإصدار "revision": ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="4c77982" # اسم العلامة، أو اسم الفرع، أو تجز؊ة الالتزام ... ) ``` من السهل أيضًا تعديل الملفات الموجودة داخل مستودع، ويمكنك عرض سجل التغييرات التي طرأت على هذه الملفات ومعاينة الاختلافات ؚين الإصدارات المختلفة: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## الإعداد Ù‚ØšÙ„ م؎اركة نموذج على Hub، ستحتاج إلى ؚيانات اعتماد حساؚ Hugging Face الخاصة ØšÙƒ. إذا كنت تستخدم منصة الأوامر، فقم ؚت؎غيل الأمر التالي في ؚي؊ة افتراضية حيث تم تثؚيت 🀗 Transformers. سيقوم هذا الأمر ؚتخزين رمز الدخول الخاص ØšÙƒ في مجلد تخزين الم؀قت لـ Hugging Face (`~/.cache/` ؚ؎كل افتراضي): ```bash huggingface-cli login ``` إذا كنت تستخدم دفتر ملاح؞ات مثل Jupyter أو Colaboratory، فتأكد من تثؚيت مكتؚة [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library). تسمح لك هذه المكتؚة ؚالتفاعل ؚرمجيًا مع Hub. ```bash pip install huggingface_hub ``` ثم استخدم `notebook_login` لتسجيل الدخول إلى Hub، واتؚع الراؚط [هنا](https://huggingface.co/settings/token) لإن؎اء رمز للتسجيل: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## تحويل النموذج ليتوافق مع جميع الأطر العمل لضمان إمكانية استخدام نموذجك من Ù‚ØšÙ„ ؎خص يعمل ؚإطار عمل مختلف، نوصي ؚتحويل نموذجك ورفعه مع نقاط التحقق من PyTorch و TensorFlow. في حين أن المستخدمين لا يزال ؚإمكانهم تحميل نموذجك من إطار عمل مختلف إذا تخطيت هذه الخطوة، إلا أنه سيكون أؚطأ لأن 🀗 Transformers ستحتاج إلى تحويل نقطة التحقق أثناء الت؎غيل. تحويل نقطة التحقق لإطار عمل آخر أمر سهل. تأكد من تثؚيت PyTorch و TensorFlow (راجع [هنا](installation) لتعليمات التثؚيت)، ثم اؚحث عن النموذج الملا؊م لمهمتك في الإطار الآخر. <frameworkcontent> <pt> حدد `from_tf=True` لتحويل نقطة تحقق من TensorFlow إلى PyTorch: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) >>> pt_model.save_pretrained("path/to/awesome-name-you-picked") ``` </pt> <tf> حدد `from_pt=True` لتحويل نقطة تحقق من PyTorch إلى TensorFlow: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) ``` ؚعد ذلك، يمكنك حف؞ نموذج TensorFlow الجديد ؚنقطة التحقق الجديدة: ```py >>> tf_model.save_pretrained("path/to/awesome-name-you-picked") ``` </tf> <jax> إذا كان النموذج متاحًا في Flax، فيمكنك أيضًا تحويل نقطة تحقق من PyTorch إلى Flax: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/to/awesome-name-you-picked", from_pt=True ... ) ``` </jax> </frameworkcontent> ## دفع نموذج أثناء التدريؚ <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> م؎اركة نموذجك على Hub مر ؚسيط للغاية كل ما عليك هو إضافة معلمة أو استدعاء رد إضافي. كما تذكر من درس [التدريؚ الدقيق](training)، فإن ف؊ة [`TrainingArguments`] هي المكان الذي تحدد فيه المعلمات الفا؊قة وخيارات التدريؚ الإضافية. ت؎مل إحدى خيارات التدريؚ هذه القدرة على دفع النموذج مؚا؎رة إلى المنصة Hub. قم ؚتعيين `push_to_hub=True` في [`TrainingArguments`]: ```py >>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) ``` مرر معامﻻت التدريؚ كالمعتاد إلى [`Trainer`]: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` ؚعد ضؚط نموذجك ؚدقة، يمكنك استخدام دالة [`~transformers.Trainer.push_to_hub`] المتاحة في [`Trainer`] لدفع النموذج المدرؚ إلى المنصة Hub. سوف تضيف 🀗 Transformers تلقا؊يًا المعلمات الفا؊قة المستخدمة في التدريؚ ونتا؊ج التدريؚ وإصدارات الإطار إلى ؚطاقة معلومات النموذج الخاصة ØšÙƒ! ```py >>> trainer.push_to_hub() ``` </pt> <tf> ؎ارك نموذجًا على Hub ؚاستخدام [`PushToHubCallback`]. في دالة [`PushToHubCallback`], أضف: - دليل إخراج لنموذجك. - مُجزّ؊ اللغوي. - `hub_model_id`، والذي هو اسم مستخدم Hub واسم النموذج الخاص ØšÙƒ. ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" ... ) ``` أضف الاستدعاء إلى [`fit`](https://keras.io/api/models/model_training_apis/)، وسيقوم 🀗 Transformers ؚدفع النموذج المدرؚ إلى Hub: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## استخدام دالة `push_to_hub` يمكنك أيضًا استدعاء `push_to_hub` مؚا؎رة على نموذجك لتحميله إلى Hub. حدد اسم نموذجك في `push_to_hub`: ```py >>> pt_model.push_to_hub("my-awesome-model") ``` ين؎؊ هذا مستودعًا تحت اسم المستخدم الخاص ØšÙƒ ؚاسم نموذج `my-awesome-model`. يمكن للمستخدمين الآن تحميل نموذجك ؚاستخدام دالة `from_pretrained`: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` إذا كنت تنتمي إلى من؞مة وتريد دفع نموذجك تحت اسم المن؞مة ؚدلاً من ذلك، فما عليك سوى إضافته إلى `repo_id`: ```py >>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") ``` يمكن أيضًا استخدام دالة `push_to_hub` لإضافة ملفات أخرى إلى مستودع النماذج. على سؚيل المثال، أضف رموزًا إلى مستودع نموذج: ```py >>> tokenizer.push_to_hub("my-awesome-model") ``` أو رؚما تريد إضافة إصدار TensorFlow من نموذج PyTorch المضؚوط: ```py >>> tf_model.push_to_hub("my-awesome-model") ``` الآن عند الانتقال إلى ملفك ال؎خصي على Hugging Face، يجؚ أن ترى مستودع النماذج الذي أن؎أته حديثًا. سي؀دي النقر فوق علامة التؚويؚ **Files** إلى عرض جميع الملفات التي قمت ؚتحميلها في المستودع. للحصول على مزيد من التفاصيل حول كيفية إن؎اء الملفات وتحميلها إلى مستودع، راجع وثا؊ق Hub [هنا](https://huggingface.co/docs/hub/how-to-upstream). ## التحميل ؚاستخدام الواجهة web يمكن للمستخدمين الذين يفضلون نهج عدم الترميز تحميل نموذج من خلال واجهة Hub web. قم ؚزيارة [huggingface.co/new](https://huggingface.co/new) لإن؎اء مستودع جديد: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) من هنا، أضف ؚعض المعلومات حول نموذجك: - حدد **مالك** المستودع. يمكن أن يكون هذا أنت أو أي من المن؞مات التي تنتمي إليها. - اختر اسمًا لنموذجك، والذي سيكون أيضًا اسم المستودع. - اختر ما إذا كان نموذجك عامًا أم خاصًا. - حدد ترخيص الاستخدام لنموذجك. الآن انقر فوق علامة التؚويؚ **Files** ثم انقر فوق الزر **Add file** لإضافة ملف جديد إلى مستودعك. ثم اسحؚ وأسقط ملفًا لتحميله وأضف رسالة الالتزام. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## إضافة ؚطاقة نموذج للتأكد من فهم المستخدمين لقدرات نموذجك وقيوده وتحيزاته المحتملة واعتؚاراته الأخلاقية، يرجى إضافة ؚطاقة نموذج إلى مستودعك. يتم تعريف ؚطاقة النموذج في ملف `README.md`. يمكنك إضافة ؚطاقة نموذج عن طريق: * قم ؚإن؎اء ملف `README.md` وتحميله يدويًا. * انقر فوق الزر **Edit model card** في مستودع نموذجك. الق ن؞رة على ؚطاقة [DistilBert](https://huggingface.co/distilbert/distilbert-base-uncased) للحصول على مثال جيد على نوع المعلومات التي يجؚ أن تتضمنها ؚطاقة النموذج. للحصول على مزيد من التفاصيل حول الخيارات الأخرى التي يمكنك التحكم فيها في ملف `README.md` مثل الؚصمة الكرؚونية للنموذج أو أمثلة الأداة، راجع الوثا؊ق [هنا](https://huggingface.co/docs/hub/models-cards).
transformers/docs/source/ar/model_sharing.md/0
{ "file_path": "transformers/docs/source/ar/model_sharing.md", "repo_id": "transformers", "token_count": 6706 }
# ما الذي تستطيع مكتؚة 🀗 Transformers القيام ؚه؟ مكتؚة 🀗 Transformers هي مجموعة من النماذج المُدرؚّة مسؚقًا الأفضل في ف؊تها لمهام معالجة اللغة الطؚيعية (NLP)، ور؀ية الحاسوؚ، ومعالجة الصوت والكلام. لا تحتوي المكتؚة فقط على نماذج المحولات (Transformer) فحسؚ، ØšÙ„ ت؎مل أيضًا نماذج أخرى لا تعتمد على المحولات مثل ال؎ؚكات العصؚية التلافيفية الحديثة لمهام ر؀ية الحاسوؚ. إذا ن؞رت إلى ؚعض المنتجات الاستهلاكية الأكثر ؎يوعًا اليوم، مثل الهواتف الذكية والتطؚيقات وأجهزة التلفاز، فمن المحتمل أن تقف وراءها تقنية ما من تقنيات التعلم العميق. هل تريد إزالة جسم من خلفية صورة التقطتها ؚهاتفك الذكي؟ هذا مثال على مهمة التجز؊ة الؚانورامية (Panoptic Segmentation) ( لا تقلق إذا لم تفهم معناها ؚعد، فسوف ن؎رحها في الأقسام التالية!). توفر هذه الصفحة ن؞رة عامة على مختلف مهام الكلام والصوت ور؀ية الحاسوؚ ومعالجة اللغات الطؚيعية المختلفة التي يمكن حلها ؚاستخدام مكتؚة 🀗 Transformers في ثلاثة أسطر فقط من التعليمات الؚرمجية! ## الصوت تختلف مهام معالجة الصوت والكلام قليلاً عن ؚاقي الوسا؊ط، ويرجع ذلك ؚؚ؎كل أساسي لأن الصوت كمدخل هو إ؎ارة متصلة. على عكس النص، لا يمكن تقسيم الموجة الصوتية الخام ؚ؎كل مرتؚ في أجزاء منفصلة ؚالطريقة التي يمكن ؚها تقسيم الجملة إلى كلمات. وللتغلؚ على هذا، يتم عادةً أخذ عينات من الإ؎ارة الصوتية الخام على فترات زمنية منت؞مة. كلما زاد عدد العينات التي ت؀خذ في فترة زمنية معينة، ارتفع معدل أخذ العينات (معدل التردد)، وصار الصوت أقرؚ إلى مصدر الصوت الأصلي. قامت الطرق الساؚقة ؚمعالجة الصوت لاستخراج الميزات المفيدة منه. أصؚح من ال؎ا؊ع الآن الؚدء ؚمهام معالجة الصوت والكلام عن طريق تغذية ؎كل الموجة الصوتية الخام مؚا؎رة في م؎فر الميزات (Feature Encoder) لاستخراج تمثيل صوتي له. وهذا يؚسط خطوة المعالجة المسؚقة ويسمح للنموذج ؚتعلم أهم الميزات. ### تصنيف الصوت تصنيف الصوت (Audio Classification) هو مهمة يتم فيها تصنيف ؚيانات الصوت الصوت من مجموعة محددة مسؚقًا من الف؊ات. إنه ف؊ة واسعة تضم العديد من التطؚيقات المحددة، والتي ت؎مل: * تصنيف الم؎هد الصوتي: وضع علامة على الصوت ؚاستخدام تسمية الم؎هد ("المكتؚ"، "ال؎اط؊"، "الملعؚ") * اكت؎اف الأحداث الصوتية: وضع علامة على الصوت ؚاستخدام تسمية حدث صوتي ("ؚوق السيارة"، "صوت الحوت"، "كسر زجاج") * الوسم: وصنيف صوت يحتوي على أصوات متعددة (أصوات الطيور، وتحديد هوية المتحدث في اجتماع) * تصنيف الموسيقى: وضع علامة على الموسيقى ؚتسمية النوع ("ميتال"، "هيؚ هوؚ"، "كانتري") ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### التعرف التلقا؊ي على الكلام يقوم التعرف التلقا؊ي على الكلام (ASR) هو عملية تحويل الكلام إلى نص. إنه أحد أكثر المهام الصوتية ؎يوعًا ويرجع ذلك جز؊يًا إلى أن الكلام وسيلة طؚيعية للتواصل الؚ؎ري. واليوم، يتم تضمين أن؞مة ASR في منتجات التقنية "الذكية" مثل مكؚرات الصوت والهواتف والسيارات. يمكننا أن نطلؚ من مساعدينا الافتراضيين ت؎غيل الموسيقى، وضؚط التذكيرات، وإخؚارنا ؚأحوال الطقس. ولكن أحد التحديات الر؊يسية التي ساعدت نماذج المحولات (Transformer) في التغلؚ عليها هو التعامل مع اللغات منخفضة الموارد. فمن خلال التدريؚ المسؚق على كميات كؚيرة من ؚيانات الصوتية، يُمكن ضؚط النموذج ؚدقة (Fine-tuning) ؚاستخدام ساعة واحدة فقط من ؚيانات الكلام المُوسم في لغة منخفضة الموارد إلى نتا؊ج عالية الجودة مقارنة ؚأن؞مة ASR الساؚقة التي تم تدريؚها على ؚيانات موسومة أكثر ØšÙ€ 100 مرة. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## ر؀ية الحاسؚ كانت إحدى أوا؊ل مهام ر؀ية الحاسؚ وأنجحها هى التعرف على صور أرقام الرموز الؚريدية ؚاستخدام [؎ؚكة عصؚية تلافيفية (CNN)](glossary#convolution). تتكون الصورة من وحدات ؚيكسل، ولكل ؚكسل قيمة رقمية. وهذا يجعل من السهل تمثيل صورة كمصفوفة من قيم الؚكسل. يصف كل مزيج معين من قيم الؚكسل ألوان الصورة. هناك طريقتان عامتان يمكن من خلالهما حل مهام ر؀ية الحاسؚ: 1. استخدام الالتفافات (Convolutions) لتعلم الميزات الهرمية للصورة ؚدءًا من الميزات منخفضة المستوى وصولًا إلى الأ؎ياء المجردة عالية المستوى. 2. تقسيم الصورة إلى أجزاء واستخدام نموذج المحولات (Transformer) ليتعلم تدريجياً كيف ترتؚط كل جزء صورة ؚؚعضها الؚعض لت؎كيل صورة. على عكس النهج ا التصاعدي (Bottom-Up) الذي تفضله ال؎ؚكات العصؚية التلافيفية CNN، هذا ÙŠØŽØšÙ‡ إلى حد ما الؚدء ؚصورة ضؚاؚية ثم جعلها أوضح تدريجيًا. ### تصنيف الصور يقوم تصنيف الصور (Image Classification) ؚوضع علامة على صورة كاملة من مجموعة محددة مسؚقًا من الف؊ات. مثل مع؞م مهام التصنيف، هناك العديد من التطؚيقات العملية لتصنيف الصور، والتي ت؎مل: * الرعاية الصحية: تصنيف الصور الطؚية للك؎ف عن الأمراض أو مراقؚة صحة المريض * الؚي؊ة: تصنيف صور الأقمار الصناعية لرصد إزالة الغاؚات، أو إؚلاغ إدارة الأراضي الؚرية أو اكت؎اف حرا؊ق الغاؚات * الزراعة: تصنيفر المحاصيل لمراقؚة صحة النؚات أو صور الأقمار الصناعية لمراقؚة استخدام الأراضي * علم الؚي؊ة: تصنيف صور الأنواع الحيوانية أو النؚاتية لرصد أعداد الكا؊نات الحية أو تتؚع الأنواع المهددة ؚالانقراض ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### ك؎ف الأجسام على عكس تصنيف الصور، يقوم ك؎ف الأجسام (Object Detection) ؚتحديد عدة أجسام داخل صورة ومواضع هذه الأجسام في صورة (يحددها مرؚع الإحاطة). ؚعض تطؚيقات ك؎ف الأجسام ت؎مل: * المركؚات ذاتية القيادة: اكت؎اف أجسام المرورية اليومية مثل المركؚات الأخرى والم؎اة وإ؎ارات المرور * الاست؎عار عن ُؚعد: مراقؚة الكوارث، والتخطيط الحضري، والتنؚ؀ ؚالطقس * اكت؎اف العيوؚ: اكت؎اف ال؎قوق أو الأضرار الهيكلية في المؚاني، وعيوؚ التصنيع ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### تجز؊ة الصور تجز؊ة الصورة (Image Segmentation) هي مهمة على مستوى الؚكسل تقوم ؚتخصيص كل ؚكسل في صورة لف؊ة معينة. إنه يختلف عن ك؎ف الأجسام، والذي يستخدم مرؚعات الإحاطة (Bounding Boxes) لتصنيف والتنؚ؀ ؚالأجسام في الصورة لأن التجز؊ة أكثر دقة. يمكن لتجز؊ة الصور اكت؎اف الأجسام على مستوى الؚكسل. هناك عدة أنواع من تجز؊ة الصور: * تجز؊ة مثيلات (Instance Segmentation): ؚالإضافة إلى تصنيف ف؊ة كا؊ن، فإنها تُصنّف أيضًا كل مثيل (Instance) مميز لكا؊ن ("الكلؚ-1"، "الكلؚ-2") * التجز؊ة الؚانورامية (Panoptic Segmentation): مزيج من التجز؊ة الدلالية (Semantic Segmentation) وتجز؊ة المثيلات؛ فهو تُصنّف كل ؚكسل مع ف؊ة دلالية **و** كل مثيل مميز لكا؊ن تُعد مهام تجز؊ة الصور مفيدة في المركؚات ذاتية القيادة على إن؎اء خريطة على مستوى الؚكسل للعالم من حولها حتى تتمكن من التنقل ؚأمان حول الم؎اة والمركؚات الأخرى. كما أنها مفيدة للتصوير الطؚي، حيث يمكن للدقة العالية لهذ المهمة أن تساعد في تحديد الخلايا غير الطؚيعية أو خصا؊ص الأعضاء. يمكن أيضًا استخدام تجز؊ة الصور في التجارة الإلكترونية لتجرؚة الملاؚس افتراضيًا أو إن؎اء تجارؚ الواقع المُعزز من خلال تراكؚ الأجسام في العالم الحقيقي من خلال الكاميرا الهاتف الخاصة ØšÙƒ. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### تقدير العمق يقوم تقدير العمق (Depth Estimation) ؚالتنؚ؀ ؚمسافة كل ؚكسل في صورة من الكاميرا. تُعد هذه المهمة لر؀ية الحاسؚ هذه مهمة ؚ؎كل خاص لفهم وإعادة ؚناء الم؎هد. فعلى سؚيل المثال، في السيارات ذاتية القيادة، تحتاج المركؚات إلى فهم مدى ُؚعد الأجسام مثل الم؎اة ولافتات المرور والمركؚات الأخرى لتجنؚ العقؚات والاصطدامات. تساعد معلومات العمق أيضًا في ؚناء التمثيلات ثلاثية الأؚعاد من الصور ثنا؊ية الأؚعاد ويمكن استخدامها لإن؎اء تمثيلات ثلاثية الأؚعاد عالية الجودة للهياكل الؚيولوجية أو المؚاني. هناك نهجان لتقدير العمق: * التصوير المجسم (Stereo): يتم تقدير العمق عن طريق مقارنة صورتين لنفس الصورة من زوايا مختلفة قليلاً. * التصوير الأحادي (Monocular): يتم تقدير العمق من صورة واحدة. ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## معالجة اللغات الطؚيعية تُعد مهام معالجة اللغة الطؚيعية (NLP) من ؚين أكثر أنواع المهام ؎يوعًا ن؞رًا لأن النص هو وسيلة طؚيعية لنا للتواصل. ولكي يتمكن النموذج من فهم النص، يجؚ أولًا تحويله إلى صيغة رقمية. وهذا يعني تقسيم سلسلة النص إلى كلمات أو مقاطع كلمات منفصلة (رموز - Tokens)، ثم تحويل هذه الرموز إلى أرقام. ونتيجة لذلك، يمكنك تمثيل سلسلة من النص كتسلسل من الأرقام، وؚمجرد حصولك على تسلسل من الأرقام، يمكن إدخاله إلى نموذج لحل جميع أنواع مهام معالجة اللغة الطؚيعية! ### تصنيف النصوص تمامًا مثل مهام التصنيف في أي مجال آخر، يقوم تصنيف النصوص (Text Classification) ؚتصنيف سلسلة نصية يمكن أن تكون جملة أو فقرة أو مستند) إلى ف؊ة محددة مسؚقًا. هناك العديد من التطؚيقات العملية لتصنيف النصوص، والتي ت؎مل: * تحليل الم؎اعر (Sentiment Analysis): تصنيف النص وفقًا لمعيار معين مثل `الإيجاؚية` أو `السلؚية` والتي يمكن أن تُعلم وتدعم عملية صنع القرار في مجالات مثل السياسة والتمويل والتسويق * تصنيف المحتوى (Content Classification): تصنيف النص وفقًا لؚعض الموضوعات للمساعدة في تن؞يم وتصفية المعلومات في الأخؚار وموجزات الوسا؊ط الاجتماعية (`الطقس`، `الرياضة`، `التمويل`، إلخ). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### تصنيف الرموز في أي مهمة من مهام معالجة اللغة الطؚيعية NLP، تتم معالجة النص مسؚقًا عن طريق تقسيمه إلى كلمات أو مقاطع كلمات فردية تُعرف ؚاسم [الرموز](glossary#token). يقوم تصنيف الرموز (Token Classification) ؚتخصيص تصنيف لكل رمز من مجموعة محددة مسؚقًا من التصنيفات. هناك نوعان ؎ا؊عان من تصنيف الرموز: * التعرف على الكيانات المسماة (NER): تصنيف الرموز وفقًا لف؊ة الكيان مثل المن؞مة أو ال؎خص أو الموقع أو التاريخ. يعد NER ؎ا؊عًا ؚ؎كل خاص في الإعدادات الطؚية الحيوية، حيث يُمكنه تصنيف الجينات والؚروتينات وأسماء الأدوية. * ترميز الأجزاء اللغوية (POS): تصنيف الرموز وفقًا للدورها النحوي مثل الاسم أو الفعل أو الصفة. POS مفيد لمساعدة أن؞مة الترجمة على فهم كيفية اختلاف كلمتين متطاؚقتين نحويًا (مثل كلمة "عَلَمَ" كاسم و "عَلِمَ" كفعل). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### الإجاؚة على الأس؊لة تُعدّ مهمة الإجاؚة عن الأس؊لة (Question Answering) مهمة أخرى على مستوى الرموز (Token-Level) تُرجع إجاؚة لس؀ال ما، وقد تعتمد هذه الإجاؚة على سياق (في النطاق المفتوح - Open-Domain) أو لا تعتمد على سياق (في النطاق المغلق - Closed-Domain). تحدث هذه المهمة عندما نسأل مساعدًا افتراضيًا عن ؎يء ما، مثل معرفة ما إذا كان مطعمٌ ما مفتوحًا. يمكن أن تُقدّم هذه المهمة أيضًا دعمًا للعملاء أو دعمًا تقنيًا، كما تُساعد محركات الؚحث في استرجاع المعلومات ذات الصلة التي نؚحث عنها. هناك نوعان ؎ا؊عان من الإجاؚة على الأس؊لة: * الاستخراجية (Extractive): ؚالن؞ر إلى س؀ال وسياق مُعيّن، فإن الإجاؚة هي مقطع نصيّ مُستخرج من السياق الذي يُحلّله النموذج. * التجريدية (Abstractive): ؚالن؞ر إلى س؀ال وسياق مُعيّن، يتم إن؎اء الإجاؚة من السياق؛ يتعامل نهج [`Text2TextGenerationPipeline`] مع هذا النهج ؚدلاً من [`QuestionAnsweringPipeline`] الموضح أدناه ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### التلخيص ين؎؊ التلخيص (Summarization) نسخة مختصرة من نص طويل مع محاولة الحفا؞ على مع؞م معنى النص الأصلي. التلخيص هو مهمة تسلسل إلى تسلسل(Sequence-to-Sequence)؛؛ فهو تُنتج تسلسلًا نصيًا أقصر من النص المُدخل. هناك الكثير من المستندات الطويلة التي يمكن تلخيصها لمساعدة القراء على فهم النقاط الر؊يسية ؚسرعة. م؎اريع القوانين والوثا؊ق القانونية والمالية وؚراءات الاختراع والأوراق العلمية هي مجرد أمثلة قليلة للوثا؊ق التي يمكن تلخيصها لتوفير وقت القراء وخدمة كمساعد للقراءة. مثل الإجاؚة على الأس؊لة، هناك نوعان من التلخيص: * الاستخراجية (Extractive): تحديد واستخراج أهم الجمل من النص الأصلي * التجريدي (Abstractive): إن؎اء ملخص مستهدف (الذي قد يتضمن كلمات جديدة غير موجودة في النص الأصلي) انطلاقًا من النص الأصلي؛ يستخدم نهج التلخيص التجريدي [`SummarizationPipeline`] ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### الترجمة تحوّل الترجمة تسلسل نص ؚلغة إلى لغة أخرى. من المهم مساعدة الأ؎خاص من خلفيات مختلفة على التواصل مع ؚعضهم الؚعض، ومساعدة المحتوى على الوصول إلى جمهور أوسع، وحتى أن يكون أداة تعليمية لمساعدة الأ؎خاص على تعلم لغة جديدة. إلى جانؚ التلخيص، تعد الترجمة مهمة من نوع تسلسل إلى تسلسل، حيث يتلقى النموذج تسلسلًا مُدخلًا ويُعيد تسلسلًا مُخرَجًا مُستهدفًا. في الأيام الأولى، كانت نماذج الترجمة في الغالؚ أحادية اللغة، ولكن م؀خرًا، كان هناك اهتمام متزايد ؚالنماذج متعددة اللغات التي يمكنها الترجمة ؚين العديد من أزواج اللغات. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### نمذجة اللغة نمذجة اللغة (Language Modeling) هي مهمة التنؚ؀ ؚالكلمة التالية في تسلسل نصي. لقد أصؚح مهمة NLP ؎ا؊عة للغاية لأن النموذج اللغوي المسؚق التدريؚ يمكن أن يتم ضؚطه ؚ؎كل دقيق للعديد من مهام الأخرى. في الآونة الأخيرة، كان هناك الكثير من الاهتمام ؚنماذج اللغة الكؚيرة (LLMs) التي توضح التعلم من الصفر أو من عدد قليل من الأمثلة (Zero-shot or Few-shot Learning). وهذا يعني أن النموذج يمكنه حل المهام التي لم يتم تدريؚه عليها ؚ؎كل صريح! يمكن استخدام نماذج اللغة لإن؎اء نص سلس ومقنع، على الرغم من أنه يجؚ أن تكون حذرًا لأن النص قد لا يكون دا؊مًا دقيقًا. هناك نوعان من نمذجة اللغة: * السؚؚية(Causal): هدف النموذج هو التنؚ؀ ؚالرمز (Token) التالي في التسلسل، ويتم إخفاء الرموز المستقؚلية (Masking). ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * المقنّع (Masked): هدف النموذج هو التنؚ؀ ؚرمز مُخفيّ ضمن التسلسل مع الوصول الكامل إلى الرموز الأخرى في التسلسل ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.2236, 'token': 1761, 'token_str': ' platform', 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] ``` ## متعدد الوسا؊ط: تتطلؚ المهام متعددة الوسا؊ط (Multimodal) من النموذج معالجة وسا؊ط ؚيانات متعددة (نص أو صورة أو صوت أو فيديو) لحل م؎كلة معينة. يعد وصف الصورة (Image Captioning) مثالاً على مهمة متعددة الوسا؊ط حيث يأخذ النموذج صورة كمدخل وينتج تسلسل نصيًا يصف الصورة أو ؚعض خصا؊صها. على الرغم من أن النماذج متعددة الوسا؊ط تعمل مع أنواع أو وسا؊ط ؚيانات مختلفة، إلا أن خطوات المعالجة المسؚقة تساعد النموذج داخليًا على تحويل جميع أنواع الؚيانات إلى متجهات تضمين (Embeddings) (متجهات أو قوا؊م من الأرقام التي تحتوي على معلومات ذات معنى حول الؚيانات). ؚالنسؚة لمهمة مثل وصف الصورة، يتعلم النموذج العلاقات ؚين متجهات تضمين الصور ومتجهات تضمين النص. ### الإجاؚة على أس؊لة المستندات: الإجاؚة على أس؊لة المستندات (Document Question Answering) هي مهمة تقوم ؚالإجاؚة على أس؊لة اللغة الطؚيعية من مستند مُعطى. على عكس مهمة الإجاؚة على الأس؊لة على مستوى الرموز (Token-Level) التي تأخذ نصًا كمدخل، فإن الإجاؚة على أس؊لة المستندات تأخذ صورة لمستند كمدخل ؚالإضافة إلى س؀ال هذا حول المستند وتعيد الإجاؚة. يمكن استخدام الإجاؚة على أس؊لة المستندات لتفسير المستندات المُنسّقة واستخراج المعلومات الر؊يسية منها. في المثال أدناه، يمكن استخراج المؚلغ الإجمالي والمؚلغ المُسترد من إيصال الدفع.. ```py >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> url = "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/jpeg_images/2.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") >>> preds = doc_question_answerer( ... question="ما هو المؚلغ الإجمالي؟", ... image=image, ... ) >>> preds [{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] ``` نأمل أن تكون هذه الصفحة قد زودتك ؚؚعض المعلومات الأساسية حول جميع أنواع المهام في كل طريقة وأهمية كل منها العملية. في القسم التالي، ستتعلم كيف تعمل مكتؚة 🀗 Transformers لحل هذه المهام.
transformers/docs/source/ar/task_summary.md/0
{ "file_path": "transformers/docs/source/ar/task_summary.md", "repo_id": "transformers", "token_count": 14746 }
# استك؎اف الأخطاء وإصلاحها تحدث الأخطاء أحيانًا، لكننا هنا للمساعدة! يغطي هذا الدليل ؚعض الم؎كلات الأكثر ؎يوعًا التي واجهناها وكيفية حلها. مع ذلك، لا يُقصد ؚهذا الدليل أن يكون مجموعة ؎املة لكل م؎كلات 🀗 Transformers. لمزيد من المساعدة في استك؎اف م؎كلتك وإصلاحها، جرؚ ما يلي: <Youtube id="S2EEG3JIt2A"/> 1. اطلؚ المساعدة على [المنتديات](https://discuss.huggingface.co/). هناك ف؊ات محددة يمكنك ن؎ر س؀الك فيها، مثل [المؚتد؊ين](https://discuss.huggingface.co/c/beginners/5) أو [🀗 Transformers](https://discuss.huggingface.co/c/transformers/9). تأكد من كتاؚة من؎ور جيد وواضح على المنتدى مع ؚعض التعليمات الؚرمجية القاؚلة للتكرار لزيادة احتمالية حل م؎كلتك! <Youtube id="_PAli-V4wj0"/> 2. قم ؚإن؎اء [م؎كلة](https://github.com/huggingface/transformers/issues/new/choose) في مستودع 🀗 Transformers إذا كانت هناك م؎كلة متعلقة ؚالمكتؚة. حاول تضمين أكؚر قدر ممكن من المعلومات التي تصف الم؎كلة لمساعدتنا في معرفة ما هو الخطأ وكيفية إصلاحه. 3. تحقق من دليل [الترحيل](migration) إذا كنت تستخدم إصدارًا أقدم من مكتؚة 🀗 Transformers حيث تم إدخال ؚعض التغييرات المهمة ؚين الإصدارات. للحصول على مزيد من التفاصيل حول استك؎اف الأخطاء وإصلاحها والحصول على المساعدة، راجع [الفصل 8](https://huggingface.co/course/chapter8/1?fw=pt) من دورة Hugging Face. ## ؚي؊ات جدار الحماية ؚعض وحدات معالجة الرسومات (GPU) على السحاؚة وإعدادات ال؎ؚكة الداخلية محمية ؚجدار حماية من الاتصالات الخارجية، مما ي؀دي إلى حدوث خطأ في الاتصال. عندما تحاول تعليمات الؚرنامج النصي تنزيل أوزان النموذج أو مجموعات الؚيانات، سيتوقف التنزيل ثم ينتهي ؚخطأ مثل: ``` ValueError: Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on. ``` في هذه الحالة، يجؚ محاولة ت؎غيل 🀗 Transformers في [وضع عدم الاتصال](installation#offline-mode) لتجنؚ خطأ الاتصال. ## CUDA نفاد الذاكرة يمكن أن يكون تدريؚ النماذج الكؚيرة التي تحتوي على ملايين المعلمات أمرًا صعًؚا ؚدون الأجهزة المناسؚة. أحد الأخطاء ال؎ا؊عة التي قد تواجهها عند نفاد ذاكرة GPU هو: ``` CUDA out of memory. Tried to allocate 256.00 MiB (GPU 0; 11.17 GiB total capacity; 9.70 GiB already allocated; 179.81 MiB free; 9.85 GiB reserved in total by PyTorch) ``` فيما يلي ؚعض الحلول المحتملة التي يمكنك تجرؚتها لتقليل استخدام الذاكرة: - قلل من قيمة [`per_device_train_batch_size`](main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size) في [`TrainingArguments`]. - حاول استخدام [`gradient_accumulation_steps`](main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps) في [`TrainingArguments`] لزيادة حجم الدُفعة ؚ؎كل فعال. <Tip> راجع دليل [الأداء](performance) لمزيد من التفاصيل حول تقنيات توفير الذاكرة. </Tip> ## عدم القدرة على تحميل نموذج TensorFlow محفو؞ تقوم طريقة TensorFlow [model.save](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) ؚحف؞ النموذج ؚالكامل - الهندسة المعمارية، الأوزان، تكوين التدريؚ - في ملف واحد. ومع ذلك، عند تحميل ملف النموذج مرة أخرى، قد تواجه خطأ لأن مكتؚة 🀗 Transformers قد لا تقوم ؚتحميل جميع الكا؊نات المتعلقة ØšÙ€ TensorFlow في ملف النموذج. لتجنؚ الم؎كلات المتعلقة ؚحف؞ وتحميل نماذج TensorFlow، نوصي ؚما يلي: - احف؞ أوزان النموذج كملف `h5` ؚاستخدام [`model.save_weights`](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) ثم أعد تحميل النموذج ؚاستخدام [`~TFPreTrainedModel.from_pretrained`]: ```python >>> from transformers import TFPreTrainedModel >>> from tensorflow import keras >>> model.save_weights("some_folder/tf_model.h5") >>> model = TFPreTrainedModel.from_pretrained("some_folder") ``` - احف؞ النموذج ؚاستخدام [`~TFPretrainedModel.save_pretrained`] وقم ؚتحميله مرة أخرى ؚاستخدام [`~TFPreTrainedModel.from_pretrained`]: ```python >>> from transformers import TFPreTrainedModel >>> model.save_pretrained("path_to/model") >>> model = TFPreTrainedModel.from_pretrained("path_to/model") ``` ## ImportError خطأ ؎ا؊ع آخر قد تواجهه، خاصة إذا كان نموذجًا تم إصداره حديثًا، هو `ImportError`: ``` ImportError: cannot import name 'ImageGPTImageProcessor' from 'transformers' (unknown location) ``` ؚالنسؚة لأنواع الأخطاء هذه، تحقق من أن لديك أحدث إصدار من مكتؚة Hugging Face Transformers مثؚتًا للوصول إلى أحدث النماذج: ```bash pip install transformers --upgrade ``` ## خطأ CUDA: تم ت؎غيل التأكيد على جانؚ الجهاز في ؚعض الأحيان، قد تواجه خطأ CUDA عامًا حول خطأ في كود الجهاز. ``` RuntimeError: CUDA error: device-side assert triggered ``` يجؚ عليك محاولة ت؎غيل الكود على وحدة المعالجة المركزية (CPU) أولاً للحصول على رسالة خطأ أكثر دقة. أضف متغير الؚي؊ة التالي في ؚداية كودك للتؚديل إلى وحدة المعالجة المركزية: ```python >>> import os >>> os.environ["CUDA_VISIBLE_DEVICES"] = "" ``` الخيار الآخر هو الحصول على تتؚع مكدس أفضل من GPU. أضف متغير الؚي؊ة التالي في ؚداية كودك للحصول على تتؚع المكدس للإ؎ارة إلى مصدر الخطأ: ```python >>> import os >>> os.environ["CUDA_LAUNCH_BLOCKING"] = "1" ``` ## إخراج غير صحيح عند عدم إخفاء رموز الح؎و في ؚعض الحالات، قد يكون `hidden_state` غير صحيحة إذا تضمنت `input_ids` رموز Ø­ØŽÙˆ. ولإثؚات ذلك، قم ؚتحميل نموذج ومجزىء لغوى. يمكنك الوصول إلى `pad_token_id` للنموذج لمعرفة قيمته. قد تكون `pad_token_id` `None` لؚعض النماذج، ولكن يمكنك دا؊مًا تعيينها يدويًا. ```python >>> from transformers import AutoModelForSequenceClassification >>> import torch >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-base-uncased") >>> model.config.pad_token_id 0 ``` يوضح المثال التالي المُخرجات ؚدون إخفاء رموز الح؎و: ```python >>> input_ids = torch.tensor([[7592, 2057, 2097, 2393, 9611, 2115], [7592, 0, 0, 0, 0, 0]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [ 0.1317, -0.1683]], grad_fn=<AddmmBackward0>) ``` هنا المُخرجات الفعلية للتسلسل الثاني: ```python >>> input_ids = torch.tensor([[7592]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` يجؚ عليك في مع؞م الوقت توفير `attention_mask` للنموذج لتجاهل رموز الح؎و لتجنؚ هذا الخطأ الصامت. الآن يتطاؚق مُخرجات التسلسل الثاني مع مُخرجاته الفعلية: <Tip> ؚ؎كل افتراضي، ين؎؊ مجزىء النصوص `attention_mask` لك استنادًا إلى إعدادات المجزىء المحدد. </Tip> ```python >>> attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0]]) >>> output = model(input_ids, attention_mask=attention_mask) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` لا ين؎؊ 🀗 Transformers تلقا؊يًا `attention_mask` لإخفاء رمز الح؎و إذا تم توفيره لأن: - ؚعض النماذج ليس لها رمز Ø­ØŽÙˆ. - ؚالنسؚة لؚعض الاستخدامات، يريد المستخدمون أن ينتؚه النموذج إلى رمز الح؎و. ## ValueError: ف؊ة التكوين غير المعترف ؚها XYZ لهذا النوع من AutoModel ؚ؎كل عام، نوصي ؚاستخدام ف؊ة [`AutoModel`] لتحميل النسخ المدرؚة مسؚقًا من النماذج. يمكن لهذه الف؊ة أن تستنتج وتُحمل تلقا؊يًا الؚنية الصحيحة من نسخ معينة ؚناءً على التكوين. إذا رأيت هذا الخطأ `ValueError` عند تحميل نموذج من نسخة، فهذا يعني أن الف؊ة التلقا؊ية (Auto) لم تتمكن من العثور على خريطة من التكوين في نقطة التفتي؎ المعطاة إلى نوع النموذج الذي تُحاول تحميله. وغالًؚا ما يحدث هذا عندما لا تدعم نقطة التفتي؎ مهمة معينة. على سؚيل المثال، سترى هذا الخطأ في المثال التالي لأنه لا يوجد GPT2 للإجاؚة على الأس؊لة: ```py >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering >>> processor = AutoProcessor.from_pretrained("openai-community/gpt2-medium") >>> model = AutoModelForQuestionAnswering.from_pretrained("openai-community/gpt2-medium") ValueError: Unrecognized configuration class <class 'transformers.models.gpt2.configuration_gpt2.GPT2Config'> for this kind of AutoModel: AutoModelForQuestionAnswering. Model type should be one of AlbertConfig, BartConfig, BertConfig, BigBirdConfig, BigBirdPegasusConfig, BloomConfig, ... ```
transformers/docs/source/ar/troubleshooting.md/0
{ "file_path": "transformers/docs/source/ar/troubleshooting.md", "repo_id": "transformers", "token_count": 5400 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Schnellstart [[open-in-colab]] Mit 🀗 Transformers können Sie sofort loslegen! Verwenden Sie die [`pipeline`] fÃŒr schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer [AutoClass](./model_doc/auto), um Ihre Text-, Bild- oder Audioaufgabe zu lösen. <Tip> Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter fÃŒr PyTorch und TensorFlow. Wenn nicht, wird erwartet, dass der Code fÃŒr beide Backends ohne Änderungen funktioniert. </Tip> ## Pipeline [`pipeline`] ist der einfachste Weg, ein vortrainiertes Modell fÃŒr eine bestimmte Aufgabe zu verwenden. <Youtube id="tiZFewofSLM"/> Die [`pipeline`] unterstÃŒtzt viele gÀngige Aufgaben: **Text**: * Stimmungsanalyse: Klassifizierung der PolaritÀt eines gegebenen Textes. * Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe. * Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der EntitÀt, die es reprÀsentiert (Person, Datum, Ort usw.). * Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind. * Fill-mask: AusfÃŒllen von LÃŒcken in einem Text mit maskierten Wörtern. * Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz. * Übersetzung: Übersetzen eines Textes in eine andere Sprache. * Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes. **Bild**: * Bildklassifizierung: Klassifizierung eines Bildes. * Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild. * Objekterkennung: Erkennen von Objekten innerhalb eines Bildes. **Audio**: * Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment. * Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text. <Tip> FÃŒr mehr Details ÃŒber die [`pipeline`] und assoziierte Aufgaben, schauen Sie in die Dokumentation [hier](./main_classes/pipelines). </Tip> ### Verwendung der Pipeline Im folgenden Beispiel werden Sie die [`pipeline`] fÃŒr die Stimmungsanalyse verwenden. Installieren Sie die folgenden AbhÀngigkeiten, falls Sie dies nicht bereits getan haben: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importieren sie die [`pipeline`] und spezifizieren sie die Aufgabe, welche sie lösen möchten: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` Die Pipeline lÀdt ein standardmÀßiges [vortrainiertes Modell](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) und einen Tokenizer fÃŒr die Stimmungs-Analyse herunter und speichert sie. Jetzt können Sie den "Klassifikator" auf Ihren Zieltext anwenden: ```py >>> classifier("We are very happy to show you the 🀗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries: ```py >>> results = classifier(["We are very happy to show you the 🀗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` Die [`pipeline`] kann auch ÃŒber einen ganzen Datensatz iterieren. Starten wir mit der Installation der [🀗 Datasets](https://huggingface.co/docs/datasets/) Bibliothek: ```bash pip install datasets ``` Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell welches wir nutzen möchten. ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Als nÀchstes laden wir den Datensatz (siehe 🀗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) fÃŒr mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Wir mÃŒssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der `facebook/wav2vec2-base-960h` trainiert wurde. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte "audio" aufgerufen wird. Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und ÃŒbergeben wir sie als Liste an die Pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` Bei einem größeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste ÃŒbergeben, der alle Eingaben in den Speicher lÀdt. Weitere Informationen finden Sie in der [Pipeline-Dokumentation](./main_classes/pipelines). ### Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden Die [`pipeline`] kann jedes Modell aus dem [Model Hub](https://huggingface.co/models) verwenden, wodurch es einfach ist, die [`pipeline`] fÃŒr andere AnwendungsfÀlle anzupassen. Wenn Sie beispielsweise ein Modell wÃŒnschen, das französischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges [BERT-Modell](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment), das auf die Stimmungsanalyse abgestimmt ist. Großartig, verwenden wir dieses Modell! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Dann können Sie das Modell und den Tokenizer in der [`pipeline`] angeben und den `Klassifikator` auf Ihren Zieltext anwenden: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes trÚs heureux de vous présenter la bibliothÚque 🀗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Wenn Sie kein Modell fÃŒr Ihren Anwendungsfall finden können, mÃŒssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser [Feinabstimmungs-Tutorial](./training) an, um zu erfahren, wie das geht. Und schließlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial [hier](./model_sharing)), um NLP fÃŒr alle zu demokratisieren! 🀗 ## AutoClass <Youtube id="AhChOFRegn4"/> Unter der Haube arbeiten die Klassen [`AutoModelForSequenceClassification`] und [`AutoTokenizer`] zusammen, um die [`pipeline`] zu betreiben. Eine [`AutoClass`](./model_doc/auto) ist eine AbkÃŒrzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie mÃŒssen nur die passende `AutoClass` fÃŒr Ihre Aufgabe und den zugehörigen Tokenizer mit [`AutoTokenizer`] auswÀhlen. Kehren wir zu unserem Beispiel zurÃŒck und sehen wir uns an, wie Sie die `AutoClass` verwenden können, um die Ergebnisse der [`pipeline`] zu replizieren. ### AutoTokenizer Ein Tokenizer ist fÃŒr die Vorverarbeitung von Text in ein fÃŒr das Modell verstÀndliches Format zustÀndig. ZunÀchst zerlegt der Tokenisierer den Text in Wörter, die *Token* genannt werden. Es gibt mehrere Regeln fÃŒr den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen ÃŒber Tokenisierung [hier](./tokenizer_summary)). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren mÃŒssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde. Laden sie einen Tokenizer mit [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Anschließend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe fÃŒr das Modell zu konstruieren. Dieser wird als *Vokabular* des Modells bezeichnet. Übergeben Sie Ihren Text an den Tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🀗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Der Tokenizer gibt ein Wörterbuch zurÃŒck, das Folgendes enthÀlt: * [input_ids](./glossary#input-ids): numerische ReprÀsentationen Ihrer Token. * [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen. Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. DarÃŒber hinaus kann der Tokenizer den Text auch auffÃŒllen und kÃŒrzen, um einen Stapel mit einheitlicher LÀnge zurÃŒckzugeben: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🀗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🀗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Lesen Sie das Tutorial [preprocessing](./preprocessing) fÃŒr weitere Details zur Tokenisierung. ### AutoModel <frameworkcontent> <pt> 🀗 Transformers bietet eine einfache und einheitliche Möglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein [`AutoModel`] laden können, wie Sie einen [`AutoTokenizer`] laden wÃŒrden. Der einzige Unterschied ist die Auswahl des richtigen [`AutoModel`] fÃŒr die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fÃŒr welche Aufgabe zu verwenden ist. </Tip> Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell ÃŒbergeben. Sie mÃŒssen nur das Wörterbuch entpacken, indem Sie `**` hinzufÃŒgen: ```py >>> pt_outputs = pt_model(**pt_batch) ``` Das Modell gibt die endgÃŒltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🀗 Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein [`TFAutoModel`] genauso laden können, wie Sie einen [`AutoTokenizer`] laden wÃŒrden. Der einzige Unterschied ist die Auswahl des richtigen [`TFAutoModel`] fÃŒr die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse fÃŒr welche Aufgabe zu verwenden ist. </Tip> Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell ÃŒbergeben, indem Sie die WörterbuchschlÃŒssel direkt an die Tensoren ÃŒbergeben: ```py >>> tf_outputs = tf_model(tf_batch) ``` Das Modell gibt die endgÃŒltigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Alle 🀗 Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren *vor* der endgÃŒltigen Aktivierungsfunktion Funktion (wie Softmax) aus, da die endgÃŒltige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist. </Tip> Modelle sind ein standardmÀßiges [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) oder ein [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model), sodass Sie sie in Ihrer ÃŒblichen Trainingsschleife verwenden können. Um jedoch die Dinge einfacher zu machen, bietet 🀗 Transformers eine [`Trainer`]-Klasse fÃŒr PyTorch, die FunktionalitÀt fÃŒr verteiltes Training, gemischte PrÀzision und mehr bietet. FÃŒr TensorFlow können Sie die Methode `fit` aus [Keras](https://keras.io/) verwenden. Siehe das [training tutorial](./training) fÃŒr weitere Details. <Tip> Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollstÀndigt werden. Die ModellausgÀnge verhalten sich auch wie ein Tupel oder ein Wörterbuch (z.B. können Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die "None" sind, ignoriert werden. </Tip> ### Modell speichern <frameworkcontent> <pt> Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer speichern, indem Sie [`PreTrainedModel.save_pretrained`] verwenden: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer unter Verwendung von [`TFPreTrainedModel.save_pretrained`] speichern: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Ein besonders cooles 🀗 Transformers-Feature ist die Möglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter "from_pt" oder "from_tf" kann das Modell von einem Framework in das andere konvertieren: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## Custom model builds Sie können die Konfigurationsklasse des Modells Àndern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitsköpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufÀllig initialisiert, und Sie mÃŒssen das Modell trainieren, bevor Sie es verwenden können, um aussagekrÀftige Ergebnisse zu erhalten. Beginnen Sie mit dem Import von [`AutoConfig`] und laden Sie dann das trainierte Modell, das Sie Àndern möchten. Innerhalb von [`AutoConfig.from_pretrained`] können Sie das Attribut angeben, das Sie Àndern möchten, z. B. die Anzahl der Aufmerksamkeitsköpfe: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Create a model from your custom configuration with [`AutoModel.from_config`]: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Create a model from your custom configuration with [`TFAutoModel.from_config`]: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung [Erstellen einer benutzerdefinierten Architektur](./create_a_model). ## Wie geht es weiter? Nachdem Sie nun die 🀗 Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun können, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells fÃŒr eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr ÃŒber die Kernkonzepte von 🀗 Transformers erfahren möchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen LeitfÀden!
transformers/docs/source/de/quicktour.md/0
{ "file_path": "transformers/docs/source/de/quicktour.md", "repo_id": "transformers", "token_count": 7324 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Instantiate a big model A barrier to accessing very large pretrained models is the amount of memory required. When loading a pretrained PyTorch model, you usually: 1. Create a model with random weights. 2. Load your pretrained weights. 3. Put those pretrained weights in the model. The first two steps both require a full version of the model in memory and if the model weighs several GBs, you may not have enough memory for two copies of it. This problem is amplified in distributed training environments because each process loads a pretrained model and stores two copies in memory. > [!TIP] > The randomly created model is initialized with "empty" tensors, which take space in memory without filling it. The random values are whatever was in this chunk of memory at the time. To improve loading speed, the [`_fast_init`](https://github.com/huggingface/transformers/blob/c9f6e5e35156e068b227dd9b15521767f6afd4d2/src/transformers/modeling_utils.py#L2710) parameter is set to `True` by default to skip the random initialization for all weights that are correctly loaded. This guide will show you how Transformers can help you load large pretrained models despite their memory requirements. ## Sharded checkpoints From Transformers v4.18.0, a checkpoint larger than 10GB is automatically sharded by the [`~PreTrainedModel.save_pretrained`] method. It is split into several smaller partial checkpoints and creates an index file that maps parameter names to the files they're stored in. The maximum shard size is controlled with the `max_shard_size` parameter, but by default it is 5GB, because it is easier to run on free-tier GPU instances without running out of memory. For example, let's shard [BioMistral/BioMistral-7B](https://hf.co/BioMistral/BioMistral-7B). ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'generation_config.json', 'model-00001-of-00006.safetensors', 'model-00002-of-00006.safetensors', 'model-00003-of-00006.safetensors', 'model-00004-of-00006.safetensors', 'model-00005-of-00006.safetensors', 'model-00006-of-00006.safetensors', 'model.safetensors.index.json'] ``` The sharded checkpoint is reloaded with the [`~PreTrainedModel.from_pretrained`] method. ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... new_model = AutoModel.from_pretrained(tmp_dir) ``` The main advantage of sharded checkpoints for big models is that each shard is loaded after the previous one, which caps the memory usage to only the model size and the largest shard size. You could also directly load a sharded checkpoint inside a model without the [`~PreTrainedModel.from_pretrained`] method (similar to PyTorch's `load_state_dict()` method for a full checkpoint). In this case, use the [`~modeling_utils.load_sharded_checkpoint`] method. ```py >>> from transformers.modeling_utils import load_sharded_checkpoint >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... load_sharded_checkpoint(model, tmp_dir) ``` ### Shard metadata The index file determines which keys are in the checkpoint and where the corresponding weights are stored. This file is loaded like any other JSON file and you can get a dictionary from it. ```py >>> import json >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... with open(os.path.join(tmp_dir, "model.safetensors.index.json"), "r") as f: ... index = json.load(f) >>> print(index.keys()) dict_keys(['metadata', 'weight_map']) ``` The `metadata` key provides the total model size. ```py >>> index["metadata"] {'total_size': 28966928384} ``` The `weight_map` key maps each parameter name (typically `state_dict` in a PyTorch model) to the shard it's stored in. ```py >>> index["weight_map"] {'lm_head.weight': 'model-00006-of-00006.safetensors', 'model.embed_tokens.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.input_layernorm.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.mlp.down_proj.weight': 'model-00001-of-00006.safetensors', ... } ``` ## Accelerate's Big Model Inference > [!TIP] > Make sure you have Accelerate v0.9.0 or later and PyTorch v1.9.0 or later installed. From Transformers v4.20.0, the [`~PreTrainedModel.from_pretrained`] method is supercharged with Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature to efficiently handle really big models! Big Model Inference creates a *model skeleton* on PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. The randomly initialized parameters are only created when the pretrained weights are loaded. This way, you aren't keeping two copies of the model in memory at the same time (one for the randomly initialized model and one for the pretrained weights), and the maximum memory consumed is only the full model size. To enable Big Model Inference in Transformers, set `low_cpu_mem_usage=True` in the [`~PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", low_cpu_mem_usage=True) ``` Accelerate automatically dispatches the model weights across all available devices, starting with the fastest device (GPU) first and then offloading to the slower devices (CPU and even hard drive). This is enabled by setting `device_map="auto"` in the [`~PreTrainedModel.from_pretrained`] method. When you pass the `device_map` parameter, `low_cpu_mem_usage` is automatically set to `True` so you don't need to specify it. ```py from transformers import AutoModelForCausalLM # these loading methods are equivalent gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto") gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", low_cpu_mem_usage=True) ``` You can also write your own `device_map` by mapping each layer to a device. It should map all model parameters to a device, but you don't have to detail where all the submodules of a layer go if the entire layer is on the same device. ```python device_map = {"model.layers.1": 0, "model.layers.14": 1, "model.layers.31": "cpu", "lm_head": "disk"} ``` Access `hf_device_map` attribute to see how Accelerate split the model across devices. ```py gemma.hf_device_map ``` ```python out {'model.embed_tokens': 0, 'model.layers.0': 0, 'model.layers.1': 0, 'model.layers.2': 0, 'model.layers.3': 0, 'model.layers.4': 0, 'model.layers.5': 0, 'model.layers.6': 0, 'model.layers.7': 0, 'model.layers.8': 0, 'model.layers.9': 0, 'model.layers.10': 0, 'model.layers.11': 0, 'model.layers.12': 0, 'model.layers.13': 0, 'model.layers.14': 'cpu', 'model.layers.15': 'cpu', 'model.layers.16': 'cpu', 'model.layers.17': 'cpu', 'model.layers.18': 'cpu', 'model.layers.19': 'cpu', 'model.layers.20': 'cpu', 'model.layers.21': 'cpu', 'model.layers.22': 'cpu', 'model.layers.23': 'cpu', 'model.layers.24': 'cpu', 'model.layers.25': 'cpu', 'model.layers.26': 'cpu', 'model.layers.27': 'cpu', 'model.layers.28': 'cpu', 'model.layers.29': 'cpu', 'model.layers.30': 'cpu', 'model.layers.31': 'cpu', 'model.norm': 'cpu', 'lm_head': 'cpu'} ``` ## Model data type PyTorch model weights are normally instantiated as torch.float32 and it can be an issue if you try to load a model as a different data type. For example, you'd need twice as much memory to load the weights in torch.float32 and then again to load them in your desired data type, like torch.float16. > [!WARNING] > Due to how PyTorch is designed, the `torch_dtype` parameter only supports floating data types. To avoid wasting memory like this, explicitly set the `torch_dtype` parameter to the desired data type or set `torch_dtype="auto"` to load the weights with the most optimal memory pattern (the data type is automatically derived from the model weights). <hfoptions id="dtype"> <hfoption id="specific dtype"> ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype=torch.float16) ``` </hfoption> <hfoption id="auto dtype"> ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype="auto") ``` </hfoption> </hfoptions> You can also set the data type to use for models instantiated from scratch. ```python import torch from transformers import AutoConfig, AutoModel my_config = AutoConfig.from_pretrained("google/gemma-2b", torch_dtype=torch.float16) model = AutoModel.from_config(my_config) ```
transformers/docs/source/en/big_models.md/0
{ "file_path": "transformers/docs/source/en/big_models.md", "repo_id": "transformers", "token_count": 3022 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Backbone A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes: * [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. * [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. [timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. Backbones are supported for the following models: * [BEiT](../model_doc/beit) * [BiT](../model_doc/bit) * [ConvNext](../model_doc/convnext) * [ConvNextV2](../model_doc/convnextv2) * [DiNAT](../model_doc/dinat) * [DINOV2](../model_doc/dinov2) * [FocalNet](../model_doc/focalnet) * [MaskFormer](../model_doc/maskformer) * [NAT](../model_doc/nat) * [ResNet](../model_doc/resnet) * [Swin Transformer](../model_doc/swin) * [Swin Transformer v2](../model_doc/swinv2) * [ViTDet](../model_doc/vitdet) ## AutoBackbone [[autodoc]] AutoBackbone ## BackboneMixin [[autodoc]] utils.BackboneMixin ## BackboneConfigMixin [[autodoc]] utils.BackboneConfigMixin ## TimmBackbone [[autodoc]] models.timm_backbone.TimmBackbone ## TimmBackboneConfig [[autodoc]] models.timm_backbone.TimmBackboneConfig
transformers/docs/source/en/main_classes/backbones.md/0
{ "file_path": "transformers/docs/source/en/main_classes/backbones.md", "repo_id": "transformers", "token_count": 689 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BEiT ## Overview The BEiT model was proposed in [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class of an image (as done in the [original ViT paper](https://arxiv.org/abs/2010.11929)), BEiT models are pre-trained to predict visual tokens from the codebook of OpenAI's [DALL-E model](https://arxiv.org/abs/2102.12092) given masked patches. The abstract from the paper is the following: *We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).* This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit). ## Usage tips - BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace [`ViTFeatureExtractor`] by [`BeitImageProcessor`] and [`ViTForImageClassification`] by [`BeitForImageClassification`]). - There's also a demo notebook available which showcases how to combine DALL-E's image tokenizer with BEiT for performing masked image modeling. You can find it [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT). - As the BEiT models expect each image to be of the same size (resolution), one can use [`BeitImageProcessor`] to resize (or rescale) and normalize images for the model. - Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `microsoft/beit-base-patch16-224` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=microsoft/beit). - The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the relative position bias among the several self-attention layers. During fine-tuning, each layer's relative position bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to pre-train a model from scratch, one needs to either set the `use_relative_position_bias` or the `use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add position embeddings. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/beit_architecture.jpg" alt="drawing" width="600"/> <small> BEiT pre-training. Taken from the <a href="https://arxiv.org/abs/2106.08254">original paper.</a> </small> ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import BeitForImageClassification model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224", attn_implementation="sdpa", torch_dtype=torch.float16) ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (NVIDIA GeForce RTX 2060-8GB, PyTorch 2.5.1, OS Ubuntu 20.04) with `float16` and `microsoft/beit-base-patch16-224` model, we saw the following improvements during training and inference: #### Training | num_training_steps | batch_size | image_size | is_cuda | Time per batch (eager - s) | Time per batch (sdpa - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) | |--------------------|------------|--------------|---------|----------------------------|---------------------------|-------------|----------------------|--------------------|----------------| | 50 | 2 | (1048, 640) | True | 0.984 | 0.746 | 31.975 | 6738.915 | 4319.886 | 55.998 | #### Inference | Image batch size | Eager (s/iter) | Eager CI, % | Eager memory (MB) | SDPA (s/iter) | SDPA CI, % | SDPA memory (MB) | SDPA speedup | SDPA memory saved (%) | |-------------------:|-----------------:|:--------------|--------------------:|----------------:|:-------------|-------------------:|---------------:|----------------------:| | 1 | 0.012 | ±0.3% | 3.76657e+08 | 0.011 | ±0.5% | 3.75739e+08 | 1.05 | 0.244 | | 4 | 0.013 | ±0.1% | 4.03147e+08 | 0.011 | ±0.2% | 3.90554e+08 | 1.178 | 3.225 | | 16 | 0.045 | ±0.1% | 4.96697e+08 | 0.035 | ±0.1% | 4.51232e+08 | 1.304 | 10.076 | | 32 | 0.088 | ±0.1% | 6.24417e+08 | 0.066 | ±0.1% | 5.33488e+08 | 1.325 | 17.044 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT. <PipelineTag pipeline="image-classification"/> - [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) **Semantic segmentation** - [Semantic segmentation task guide](../tasks/semantic_segmentation) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## BEiT specific outputs [[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling [[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling ## BeitConfig [[autodoc]] BeitConfig ## BeitFeatureExtractor [[autodoc]] BeitFeatureExtractor - __call__ - post_process_semantic_segmentation ## BeitImageProcessor [[autodoc]] BeitImageProcessor - preprocess - post_process_semantic_segmentation <frameworkcontent> <pt> ## BeitModel [[autodoc]] BeitModel - forward ## BeitForMaskedImageModeling [[autodoc]] BeitForMaskedImageModeling - forward ## BeitForImageClassification [[autodoc]] BeitForImageClassification - forward ## BeitForSemanticSegmentation [[autodoc]] BeitForSemanticSegmentation - forward </pt> <jax> ## FlaxBeitModel [[autodoc]] FlaxBeitModel - __call__ ## FlaxBeitForMaskedImageModeling [[autodoc]] FlaxBeitForMaskedImageModeling - __call__ ## FlaxBeitForImageClassification [[autodoc]] FlaxBeitForImageClassification - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/beit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/beit.md", "repo_id": "transformers", "token_count": 3501 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=convbert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/conv-bert-base"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ConvBERT model was proposed in [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. The abstract from the paper is the following: *Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.* This model was contributed by [abhishek](https://huggingface.co/abhishek). The original implementation can be found here: https://github.com/yitu-opensource/ConvBert ## Usage tips ConvBERT training tips are similar to those of BERT. For usage tips refer to [BERT documentation](bert). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## ConvBertConfig [[autodoc]] ConvBertConfig ## ConvBertTokenizer [[autodoc]] ConvBertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## ConvBertTokenizerFast [[autodoc]] ConvBertTokenizerFast <frameworkcontent> <pt> ## ConvBertModel [[autodoc]] ConvBertModel - forward ## ConvBertForMaskedLM [[autodoc]] ConvBertForMaskedLM - forward ## ConvBertForSequenceClassification [[autodoc]] ConvBertForSequenceClassification - forward ## ConvBertForMultipleChoice [[autodoc]] ConvBertForMultipleChoice - forward ## ConvBertForTokenClassification [[autodoc]] ConvBertForTokenClassification - forward ## ConvBertForQuestionAnswering [[autodoc]] ConvBertForQuestionAnswering - forward </pt> <tf> ## TFConvBertModel [[autodoc]] TFConvBertModel - call ## TFConvBertForMaskedLM [[autodoc]] TFConvBertForMaskedLM - call ## TFConvBertForSequenceClassification [[autodoc]] TFConvBertForSequenceClassification - call ## TFConvBertForMultipleChoice [[autodoc]] TFConvBertForMultipleChoice - call ## TFConvBertForTokenClassification [[autodoc]] TFConvBertForTokenClassification - call ## TFConvBertForQuestionAnswering [[autodoc]] TFConvBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/convbert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/convbert.md", "repo_id": "transformers", "token_count": 1393 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # EfficientNet ## Overview The EfficientNet model was proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models. The abstract from the paper is the following: *Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.* This model was contributed by [adirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). ## EfficientNetConfig [[autodoc]] EfficientNetConfig ## EfficientNetImageProcessor [[autodoc]] EfficientNetImageProcessor - preprocess ## EfficientNetModel [[autodoc]] EfficientNetModel - forward ## EfficientNetForImageClassification [[autodoc]] EfficientNetForImageClassification - forward
transformers/docs/source/en/model_doc/efficientnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/efficientnet.md", "repo_id": "transformers", "token_count": 725 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FNet ## Overview The FNet model was proposed in [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following: *We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that "mix" input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the "efficient" Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.* This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net). ## Usage tips The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## FNetConfig [[autodoc]] FNetConfig ## FNetTokenizer [[autodoc]] FNetTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## FNetTokenizerFast [[autodoc]] FNetTokenizerFast ## FNetModel [[autodoc]] FNetModel - forward ## FNetForPreTraining [[autodoc]] FNetForPreTraining - forward ## FNetForMaskedLM [[autodoc]] FNetForMaskedLM - forward ## FNetForNextSentencePrediction [[autodoc]] FNetForNextSentencePrediction - forward ## FNetForSequenceClassification [[autodoc]] FNetForSequenceClassification - forward ## FNetForMultipleChoice [[autodoc]] FNetForMultipleChoice - forward ## FNetForTokenClassification [[autodoc]] FNetForTokenClassification - forward ## FNetForQuestionAnswering [[autodoc]] FNetForQuestionAnswering - forward
transformers/docs/source/en/model_doc/fnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fnet.md", "repo_id": "transformers", "token_count": 1150 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-NeoX-Japanese ## Overview We introduce GPT-NeoX-Japanese, which is an autoregressive language model for Japanese, trained on top of [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox). Japanese is a unique language with its large vocabulary and a combination of hiragana, katakana, and kanji writing scripts. To address this distinct structure of the Japanese language, we use a [special sub-word tokenizer](https://github.com/tanreinama/Japanese-BPEEncoder_V2). We are very grateful to *tanreinama* for open-sourcing this incredibly helpful tokenizer. Following the recommendations from Google's research on [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html), we have removed bias parameters from transformer blocks, achieving better model performance. Please refer [this article](https://medium.com/ml-abeja/training-a-better-gpt-2-93b157662ae4) in detail. Development of the model was led by [Shinya Otani](https://github.com/SO0529), [Takayoshi Makabe](https://github.com/spider-man-tm), [Anuj Arora](https://github.com/Anuj040), and [Kyo Hattori](https://github.com/go5paopao) from [ABEJA, Inc.](https://www.abejainc.com/). For more information on this model-building activity, please refer [here (ja)](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207). ### Usage example The `generate()` method can be used to generate text using GPT NeoX Japanese model. ```python >>> from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseTokenizer >>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> prompt = "人ずAIが協調するためには、" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0] >>> print(gen_text) 人ずAIが協調するためには、AIず人が共存し、AIを正しく理解する必芁がありたす。 ``` ## Resources - [Causal language modeling task guide](../tasks/language_modeling) ## GPTNeoXJapaneseConfig [[autodoc]] GPTNeoXJapaneseConfig ## GPTNeoXJapaneseTokenizer [[autodoc]] GPTNeoXJapaneseTokenizer ## GPTNeoXJapaneseModel [[autodoc]] GPTNeoXJapaneseModel - forward ## GPTNeoXJapaneseForCausalLM [[autodoc]] GPTNeoXJapaneseForCausalLM - forward
transformers/docs/source/en/model_doc/gpt_neox_japanese.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt_neox_japanese.md", "repo_id": "transformers", "token_count": 1075 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LiLT ## Overview The LiLT model was proposed in [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding. LiLT allows to combine any pre-trained RoBERTa text encoder with a lightweight Layout Transformer, to enable [LayoutLM](layoutlm)-like document understanding for many languages. The abstract from the paper is the following: *Structured document understanding has attracted considerable attention and made significant progress recently, owing to its crucial role in intelligent document processing. However, most existing related models can only deal with the document data of specific language(s) (typically English) included in the pre-training collection, which is extremely limited. To address this issue, we propose a simple yet effective Language-independent Layout Transformer (LiLT) for structured document understanding. LiLT can be pre-trained on the structured documents of a single language and then directly fine-tuned on other languages with the corresponding off-the-shelf monolingual/multilingual pre-trained textual models. Experimental results on eight languages have shown that LiLT can achieve competitive or even superior performance on diverse widely-used downstream benchmarks, which enables language-independent benefit from the pre-training of document layout structure.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/lilt_architecture.jpg" alt="drawing" width="600"/> <small> LiLT architecture. Taken from the <a href="https://arxiv.org/abs/2202.13669">original paper</a>. </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/jpwang/lilt). ## Usage tips - To combine the Language-Independent Layout Transformer with a new RoBERTa checkpoint from the [hub](https://huggingface.co/models?search=roberta), refer to [this guide](https://github.com/jpWang/LiLT#or-generate-your-own-checkpoint-optional). The script will result in `config.json` and `pytorch_model.bin` files being stored locally. After doing this, one can do the following (assuming you're logged in with your HuggingFace account): ```python from transformers import LiltModel model = LiltModel.from_pretrained("path_to_your_files") model.push_to_hub("name_of_repo_on_the_hub") ``` - When preparing data for the model, make sure to use the token vocabulary that corresponds to the RoBERTa checkpoint you combined with the Layout Transformer. - As [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) uses the same vocabulary as [LayoutLMv3](layoutlmv3), one can use [`LayoutLMv3TokenizerFast`] to prepare data for the model. The same is true for [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-infoxlm-base): one can use [`LayoutXLMTokenizerFast`] for that model. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LiLT. - Demo notebooks for LiLT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LiLT). **Documentation resources** - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## LiltConfig [[autodoc]] LiltConfig ## LiltModel [[autodoc]] LiltModel - forward ## LiltForSequenceClassification [[autodoc]] LiltForSequenceClassification - forward ## LiltForTokenClassification [[autodoc]] LiltForTokenClassification - forward ## LiltForQuestionAnswering [[autodoc]] LiltForQuestionAnswering - forward
transformers/docs/source/en/model_doc/lilt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/lilt.md", "repo_id": "transformers", "token_count": 1291 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MarianMT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=marian"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-marian-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/opus-mt-zh-en"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview A framework for translation models, using the same models as BART. Translations should be similar, but not identical to output in the test set linked to in each model card. This model was contributed by [sshleifer](https://huggingface.co/sshleifer). ## Implementation Notes - Each model is about 298 MB on disk, there are more than 1,000 models. - The list of supported language pairs can be found [here](https://huggingface.co/Helsinki-NLP). - Models were originally trained by [Jörg Tiedemann](https://researchportal.helsinki.fi/en/persons/j%C3%B6rg-tiedemann) using the [Marian](https://marian-nmt.github.io/) C++ library, which supports fast training and translation. - All models are transformer encoder-decoders with 6 layers in each component. Each model's performance is documented in a model card. - The 80 opus models that require BPE preprocessing are not supported. - The modeling code is the same as [`BartForConditionalGeneration`] with a few minor modifications: - static (sinusoid) positional embeddings (`MarianConfig.static_position_embeddings=True`) - no layernorm_embedding (`MarianConfig.normalize_embedding=False`) - the model starts generating with `pad_token_id` (which has 0 as a token_embedding) as the prefix (Bart uses `<s/>`), - Code to bulk convert models can be found in `convert_marian_to_pytorch.py`. ## Naming - All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}` - The language codes used to name models are inconsistent. Two digit codes can usually be found [here](https://developers.google.com/admin-sdk/directory/v1/languages), three digit codes require googling "language code {code}". - Codes formatted like `es_AR` are usually `code_{region}`. That one is Spanish from Argentina. - The models were converted in two stages. The first 1000 models use ISO-639-2 codes to identify languages, the second group use a combination of ISO-639-5 codes and ISO-639-2 codes. ## Examples - Since Marian models are smaller than many other translation models available in the library, they can be useful for fine-tuning experiments and integration tests. - [Fine-tune on GPU](https://github.com/huggingface/transformers/blob/master/examples/legacy/seq2seq/train_distil_marian_enro.sh) ## Multilingual Models - All model names use the following format: `Helsinki-NLP/opus-mt-{src}-{tgt}`: - If a model can output multiple languages, and you should specify a language code by prepending the desired output language to the `src_text`. - You can see a models's supported language codes in its model card, under target constituents, like in [opus-mt-en-roa](https://huggingface.co/Helsinki-NLP/opus-mt-en-roa). - Note that if a model is only multilingual on the source side, like `Helsinki-NLP/opus-mt-roa-en`, no language codes are required. New multi-lingual models from the [Tatoeba-Challenge repo](https://github.com/Helsinki-NLP/Tatoeba-Challenge) require 3 character language codes: ```python >>> from transformers import MarianMTModel, MarianTokenizer >>> src_text = [ ... ">>fra<< this is a sentence in english that we want to translate to french", ... ">>por<< This should go to portuguese", ... ">>esp<< And this to Spanish", ... ] >>> model_name = "Helsinki-NLP/opus-mt-en-roa" >>> tokenizer = MarianTokenizer.from_pretrained(model_name) >>> print(tokenizer.supported_language_codes) ['>>zlm_Latn<<', '>>mfe<<', '>>hat<<', '>>pap<<', '>>ast<<', '>>cat<<', '>>ind<<', '>>glg<<', '>>wln<<', '>>spa<<', '>>fra<<', '>>ron<<', '>>por<<', '>>ita<<', '>>oci<<', '>>arg<<', '>>min<<'] >>> model = MarianMTModel.from_pretrained(model_name) >>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) >>> [tokenizer.decode(t, skip_special_tokens=True) for t in translated] ["c'est une phrase en anglais que nous voulons traduire en français", 'Isto deve ir para o português.', 'Y esto al español'] ``` Here is the code to see all available pretrained models on the hub: ```python from huggingface_hub import list_models model_list = list_models() org = "Helsinki-NLP" model_ids = [x.id for x in model_list if x.id.startswith(org)] suffix = [x.split("/")[1] for x in model_ids] old_style_multi_models = [f"{org}/{s}" for s in suffix if s != s.lower()] ``` ## Old Style Multi-Lingual Models These are the old style multi-lingual models ported from the OPUS-MT-Train repo: and the members of each language group: ```python no-style ['Helsinki-NLP/opus-mt-NORTH_EU-NORTH_EU', 'Helsinki-NLP/opus-mt-ROMANCE-en', 'Helsinki-NLP/opus-mt-SCANDINAVIA-SCANDINAVIA', 'Helsinki-NLP/opus-mt-de-ZH', 'Helsinki-NLP/opus-mt-en-CELTIC', 'Helsinki-NLP/opus-mt-en-ROMANCE', 'Helsinki-NLP/opus-mt-es-NORWAY', 'Helsinki-NLP/opus-mt-fi-NORWAY', 'Helsinki-NLP/opus-mt-fi-ZH', 'Helsinki-NLP/opus-mt-fi_nb_no_nn_ru_sv_en-SAMI', 'Helsinki-NLP/opus-mt-sv-NORWAY', 'Helsinki-NLP/opus-mt-sv-ZH'] GROUP_MEMBERS = { 'ZH': ['cmn', 'cn', 'yue', 'ze_zh', 'zh_cn', 'zh_CN', 'zh_HK', 'zh_tw', 'zh_TW', 'zh_yue', 'zhs', 'zht', 'zh'], 'ROMANCE': ['fr', 'fr_BE', 'fr_CA', 'fr_FR', 'wa', 'frp', 'oc', 'ca', 'rm', 'lld', 'fur', 'lij', 'lmo', 'es', 'es_AR', 'es_CL', 'es_CO', 'es_CR', 'es_DO', 'es_EC', 'es_ES', 'es_GT', 'es_HN', 'es_MX', 'es_NI', 'es_PA', 'es_PE', 'es_PR', 'es_SV', 'es_UY', 'es_VE', 'pt', 'pt_br', 'pt_BR', 'pt_PT', 'gl', 'lad', 'an', 'mwl', 'it', 'it_IT', 'co', 'nap', 'scn', 'vec', 'sc', 'ro', 'la'], 'NORTH_EU': ['de', 'nl', 'fy', 'af', 'da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SCANDINAVIA': ['da', 'fo', 'is', 'no', 'nb', 'nn', 'sv'], 'SAMI': ['se', 'sma', 'smj', 'smn', 'sms'], 'NORWAY': ['nb_NO', 'nb', 'nn_NO', 'nn', 'nog', 'no_nb', 'no'], 'CELTIC': ['ga', 'cy', 'br', 'gd', 'kw', 'gv'] } ``` Example of translating english to many romance languages, using old-style 2 character language codes ```python >>> from transformers import MarianMTModel, MarianTokenizer >>> src_text = [ ... ">>fr<< this is a sentence in english that we want to translate to french", ... ">>pt<< This should go to portuguese", ... ">>es<< And this to Spanish", ... ] >>> model_name = "Helsinki-NLP/opus-mt-en-ROMANCE" >>> tokenizer = MarianTokenizer.from_pretrained(model_name) >>> model = MarianMTModel.from_pretrained(model_name) >>> translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) >>> tgt_text = [tokenizer.decode(t, skip_special_tokens=True) for t in translated] ["c'est une phrase en anglais que nous voulons traduire en français", 'Isto deve ir para o português.', 'Y esto al español'] ``` ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) - [Causal language modeling task guide](../tasks/language_modeling) ## MarianConfig [[autodoc]] MarianConfig ## MarianTokenizer [[autodoc]] MarianTokenizer - build_inputs_with_special_tokens <frameworkcontent> <pt> ## MarianModel [[autodoc]] MarianModel - forward ## MarianMTModel [[autodoc]] MarianMTModel - forward ## MarianForCausalLM [[autodoc]] MarianForCausalLM - forward </pt> <tf> ## TFMarianModel [[autodoc]] TFMarianModel - call ## TFMarianMTModel [[autodoc]] TFMarianMTModel - call </tf> <jax> ## FlaxMarianModel [[autodoc]] FlaxMarianModel - __call__ ## FlaxMarianMTModel [[autodoc]] FlaxMarianMTModel - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/marian.md/0
{ "file_path": "transformers/docs/source/en/model_doc/marian.md", "repo_id": "transformers", "token_count": 3062 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MMS ## Overview The MMS model was proposed in [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli The abstract from the paper is the following: *Expanding the language coverage of speech technology has the potential to improve access to information for many more people. However, current speech technology is restricted to about one hundred languages which is a small fraction of the over 7,000 languages spoken around the world. The Massively Multilingual Speech (MMS) project increases the number of supported languages by 10-40x, depending on the task. The main ingredients are a new dataset based on readings of publicly available religious texts and effectively leveraging self-supervised learning. We built pre-trained wav2vec 2.0 models covering 1,406 languages, a single multilingual automatic speech recognition model for 1,107 languages, speech synthesis models for the same number of languages, as well as a language identification model for 4,017 languages. Experiments show that our multilingual speech recognition model more than halves the word error rate of Whisper on 54 languages of the FLEURS benchmark while being trained on a small fraction of the labeled data.* Here are the different models open sourced in the MMS project. The models and code are originally released [here](https://github.com/facebookresearch/fairseq/tree/main/examples/mms). We have add them to the `transformers` framework, making them easier to use. ### Automatic Speech Recognition (ASR) The ASR model checkpoints can be found here : [mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102), [mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107), [mms-1b-all](https://huggingface.co/facebook/mms-1b-all). For best accuracy, use the `mms-1b-all` model. Tips: - All ASR models accept a float array corresponding to the raw waveform of the speech signal. The raw waveform should be pre-processed with [`Wav2Vec2FeatureExtractor`]. - The models were trained using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - You can load different language adapter weights for different languages via [`~Wav2Vec2PreTrainedModel.load_adapter`]. Language adapters only consists of roughly 2 million parameters and can therefore be efficiently loaded on the fly when needed. #### Loading By default MMS loads adapter weights for English. If you want to load adapter weights of another language make sure to specify `target_lang=<your-chosen-target-lang>` as well as `"ignore_mismatched_sizes=True`. The `ignore_mismatched_sizes=True` keyword has to be passed to allow the language model head to be resized according to the vocabulary of the specified language. Similarly, the processor should be loaded with the same target language ```py from transformers import Wav2Vec2ForCTC, AutoProcessor model_id = "facebook/mms-1b-all" target_lang = "fra" processor = AutoProcessor.from_pretrained(model_id, target_lang=target_lang) model = Wav2Vec2ForCTC.from_pretrained(model_id, target_lang=target_lang, ignore_mismatched_sizes=True) ``` <Tip> You can safely ignore a warning such as: ```text Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/mms-1b-all and are newly initialized because the shapes did not match: - lm_head.bias: found shape torch.Size([154]) in the checkpoint and torch.Size([314]) in the model instantiated - lm_head.weight: found shape torch.Size([154, 1280]) in the checkpoint and torch.Size([314, 1280]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` </Tip> If you want to use the ASR pipeline, you can load your chosen target language as such: ```py from transformers import pipeline model_id = "facebook/mms-1b-all" target_lang = "fra" pipe = pipeline(model=model_id, model_kwargs={"target_lang": "fra", "ignore_mismatched_sizes": True}) ``` #### Inference Next, let's look at how we can run MMS in inference and change adapter layers after having called [`~PretrainedModel.from_pretrained`] First, we load audio data in different languages using the [Datasets](https://github.com/huggingface/datasets). ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # French stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "fr", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) fr_sample = next(iter(stream_data))["audio"]["array"] ``` Next, we load the model and processor ```py from transformers import Wav2Vec2ForCTC, AutoProcessor import torch model_id = "facebook/mms-1b-all" processor = AutoProcessor.from_pretrained(model_id) model = Wav2Vec2ForCTC.from_pretrained(model_id) ``` Now we process the audio data, pass the processed audio data to the model and transcribe the model output, just like we usually do for [`Wav2Vec2ForCTC`]. ```py inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # 'joe keton disapproved of films and buster also had reservations about the media' ``` We can now keep the same model in memory and simply switch out the language adapters by calling the convenient [`~Wav2Vec2ForCTC.load_adapter`] function for the model and [`~Wav2Vec2CTCTokenizer.set_target_lang`] for the tokenizer. We pass the target language as an input - `"fra"` for French. ```py processor.tokenizer.set_target_lang("fra") model.load_adapter("fra") inputs = processor(fr_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits ids = torch.argmax(outputs, dim=-1)[0] transcription = processor.decode(ids) # "ce dernier est volé tout au long de l'histoire romaine" ``` In the same way the language can be switched out for all other supported languages. Please have a look at: ```py processor.tokenizer.vocab.keys() ``` to see all supported languages. To further improve performance from ASR models, language model decoding can be used. See the documentation [here](https://huggingface.co/facebook/mms-1b-all) for further details. ### Speech Synthesis (TTS) MMS-TTS uses the same model architecture as VITS, which was added to 🀗 Transformers in v4.33. MMS trains a separate model checkpoint for each of the 1100+ languages in the project. All available checkpoints can be found on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts), and the inference documentation under [VITS](https://huggingface.co/docs/transformers/main/en/model_doc/vits). #### Inference To use the MMS model, first update to the latest version of the Transformers library: ```bash pip install --upgrade transformers accelerate ``` Since the flow-based model in VITS is non-deterministic, it is good practice to set a seed to ensure reproducibility of the outputs. - For languages with a Roman alphabet, such as English or French, the tokenizer can be used directly to pre-process the text inputs. The following code example runs a forward pass using the MMS-TTS English checkpoint: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") model = VitsModel.from_pretrained("facebook/mms-tts-eng") inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") set_seed(555) # make deterministic with torch.no_grad(): outputs = model(**inputs) waveform = outputs.waveform[0] ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("synthesized_speech.wav", rate=model.config.sampling_rate, data=waveform) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(waveform, rate=model.config.sampling_rate) ``` For certain languages with non-Roman alphabets, such as Arabic, Mandarin or Hindi, the [`uroman`](https://github.com/isi-nlp/uroman) perl package is required to pre-process the text inputs to the Roman alphabet. You can check whether you require the `uroman` package for your language by inspecting the `is_uroman` attribute of the pre-trained `tokenizer`: ```python from transformers import VitsTokenizer tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") print(tokenizer.is_uroman) ``` If required, you should apply the uroman package to your text inputs **prior** to passing them to the `VitsTokenizer`, since currently the tokenizer does not support performing the pre-processing itself. To do this, first clone the uroman repository to your local machine and set the bash variable `UROMAN` to the local path: ```bash git clone https://github.com/isi-nlp/uroman.git cd uroman export UROMAN=$(pwd) ``` You can then pre-process the text input using the following code snippet. You can either rely on using the bash variable `UROMAN` to point to the uroman repository, or you can pass the uroman directory as an argument to the `uromanize` function: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed import os import subprocess tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-kor") model = VitsModel.from_pretrained("facebook/mms-tts-kor") def uromanize(input_string, uroman_path): """Convert non-Roman strings to Roman using the `uroman` perl package.""" script_path = os.path.join(uroman_path, "bin", "uroman.pl") command = ["perl", script_path] process = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Execute the perl command stdout, stderr = process.communicate(input=input_string.encode()) if process.returncode != 0: raise ValueError(f"Error {process.returncode}: {stderr.decode()}") # Return the output as a string and skip the new-line character at the end return stdout.decode()[:-1] text = "읎뎐 묎슚 음읎알" uromanized_text = uromanize(text, uroman_path=os.environ["UROMAN"]) inputs = tokenizer(text=uromanized_text, return_tensors="pt") set_seed(555) # make deterministic with torch.no_grad(): outputs = model(inputs["input_ids"]) waveform = outputs.waveform[0] ``` **Tips:** * The MMS-TTS checkpoints are trained on lower-cased, un-punctuated text. By default, the `VitsTokenizer` *normalizes* the inputs by removing any casing and punctuation, to avoid passing out-of-vocabulary characters to the model. Hence, the model is agnostic to casing and punctuation, so these should be avoided in the text prompt. You can disable normalisation by setting `normalize=False` in the call to the tokenizer, but this will lead to un-expected behaviour and is discouraged. * The speaking rate can be varied by setting the attribute `model.speaking_rate` to a chosen value. Likewise, the randomness of the noise is controlled by `model.noise_scale`: ```python import torch from transformers import VitsTokenizer, VitsModel, set_seed tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") model = VitsModel.from_pretrained("facebook/mms-tts-eng") inputs = tokenizer(text="Hello - my dog is cute", return_tensors="pt") # make deterministic set_seed(555) # make speech faster and more noisy model.speaking_rate = 1.5 model.noise_scale = 0.8 with torch.no_grad(): outputs = model(**inputs) ``` ### Language Identification (LID) Different LID models are available based on the number of languages they can recognize - [126](https://huggingface.co/facebook/mms-lid-126), [256](https://huggingface.co/facebook/mms-lid-256), [512](https://huggingface.co/facebook/mms-lid-512), [1024](https://huggingface.co/facebook/mms-lid-1024), [2048](https://huggingface.co/facebook/mms-lid-2048), [4017](https://huggingface.co/facebook/mms-lid-4017). #### Inference First, we install transformers and some other libraries ```bash pip install torch accelerate datasets[audio] pip install --upgrade transformers ```` Next, we load a couple of audio samples via `datasets`. Make sure that the audio data is sampled to 16000 kHz. ```py from datasets import load_dataset, Audio # English stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "en", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) en_sample = next(iter(stream_data))["audio"]["array"] # Arabic stream_data = load_dataset("mozilla-foundation/common_voice_13_0", "ar", split="test", streaming=True) stream_data = stream_data.cast_column("audio", Audio(sampling_rate=16000)) ar_sample = next(iter(stream_data))["audio"]["array"] ``` Next, we load the model and processor ```py from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor import torch model_id = "facebook/mms-lid-126" processor = AutoFeatureExtractor.from_pretrained(model_id) model = Wav2Vec2ForSequenceClassification.from_pretrained(model_id) ``` Now we process the audio data, pass the processed audio data to the model to classify it into a language, just like we usually do for Wav2Vec2 audio classification models such as [ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition](https://huggingface.co/harshit345/xlsr-wav2vec-speech-emotion-recognition) ```py # English inputs = processor(en_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits lang_id = torch.argmax(outputs, dim=-1)[0].item() detected_lang = model.config.id2label[lang_id] # 'eng' # Arabic inputs = processor(ar_sample, sampling_rate=16_000, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs).logits lang_id = torch.argmax(outputs, dim=-1)[0].item() detected_lang = model.config.id2label[lang_id] # 'ara' ``` To see all the supported languages of a checkpoint, you can print out the language ids as follows: ```py processor.id2label.values() ``` ### Audio Pretrained Models Pretrained models are available for two different sizes - [300M](https://huggingface.co/facebook/mms-300m) , [1Bil](https://huggingface.co/facebook/mms-1b). <Tip> The MMS for ASR architecture is based on the Wav2Vec2 model, refer to [Wav2Vec2's documentation page](wav2vec2) for further details on how to finetune with models for various downstream tasks. MMS-TTS uses the same model architecture as VITS, refer to [VITS's documentation page](vits) for API reference. </Tip>
transformers/docs/source/en/model_doc/mms.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mms.md", "repo_id": "transformers", "token_count": 4924 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RemBERT ## Overview The RemBERT model was proposed in [Rethinking Embedding Coupling in Pre-trained Language Models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, Melvin Johnson, Sebastian Ruder. The abstract from the paper is the following: *We re-evaluate the standard practice of sharing weights between input and output embeddings in state-of-the-art pre-trained language models. We show that decoupled embeddings provide increased modeling flexibility, allowing us to significantly improve the efficiency of parameter allocation in the input embedding of multilingual models. By reallocating the input embedding parameters in the Transformer layers, we achieve dramatically better performance on standard natural language understanding tasks with the same number of parameters during fine-tuning. We also show that allocating additional capacity to the output embedding provides benefits to the model that persist through the fine-tuning stage even though the output embedding is discarded after pre-training. Our analysis shows that larger output embeddings prevent the model's last layers from overspecializing to the pre-training task and encourage Transformer representations to be more general and more transferable to other tasks and languages. Harnessing these findings, we are able to train models that achieve strong performance on the XTREME benchmark without increasing the number of parameters at the fine-tuning stage.* ## Usage tips For fine-tuning, RemBERT can be thought of as a bigger version of mBERT with an ALBERT-like factorization of the embedding layer. The embeddings are not tied in pre-training, in contrast with BERT, which enables smaller input embeddings (preserved during fine-tuning) and bigger output embeddings (discarded at fine-tuning). The tokenizer is also similar to the Albert one rather than the BERT one. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## RemBertConfig [[autodoc]] RemBertConfig ## RemBertTokenizer [[autodoc]] RemBertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## RemBertTokenizerFast [[autodoc]] RemBertTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## RemBertModel [[autodoc]] RemBertModel - forward ## RemBertForCausalLM [[autodoc]] RemBertForCausalLM - forward ## RemBertForMaskedLM [[autodoc]] RemBertForMaskedLM - forward ## RemBertForSequenceClassification [[autodoc]] RemBertForSequenceClassification - forward ## RemBertForMultipleChoice [[autodoc]] RemBertForMultipleChoice - forward ## RemBertForTokenClassification [[autodoc]] RemBertForTokenClassification - forward ## RemBertForQuestionAnswering [[autodoc]] RemBertForQuestionAnswering - forward </pt> <tf> ## TFRemBertModel [[autodoc]] TFRemBertModel - call ## TFRemBertForMaskedLM [[autodoc]] TFRemBertForMaskedLM - call ## TFRemBertForCausalLM [[autodoc]] TFRemBertForCausalLM - call ## TFRemBertForSequenceClassification [[autodoc]] TFRemBertForSequenceClassification - call ## TFRemBertForMultipleChoice [[autodoc]] TFRemBertForMultipleChoice - call ## TFRemBertForTokenClassification [[autodoc]] TFRemBertForTokenClassification - call ## TFRemBertForQuestionAnswering [[autodoc]] TFRemBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/rembert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/rembert.md", "repo_id": "transformers", "token_count": 1363 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ViTMAE ## Overview The ViTMAE model was proposed in [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377v2) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. The paper shows that, by pre-training a Vision Transformer (ViT) to reconstruct pixel values for masked patches, one can get results after fine-tuning that outperform supervised pre-training. The abstract from the paper is the following: *This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior.* <img src="https://user-images.githubusercontent.com/11435359/146857310-f258c86c-fde6-48e8-9cee-badd2b21bd2c.png" alt="drawing" width="600"/> <small> MAE architecture. Taken from the <a href="https://arxiv.org/abs/2111.06377">original paper.</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [sayakpaul](https://github.com/sayakpaul) and [ariG23498](https://github.com/ariG23498) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/mae). ## Usage tips - MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple: by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use [`ViTMAEForPreTraining`] for this purpose. - After pre-training, one "throws away" the decoder used to reconstruct pixels, and one uses the encoder for fine-tuning/linear probing. This means that after fine-tuning, one can directly plug in the weights into a [`ViTForImageClassification`]. - One can use [`ViTImageProcessor`] to prepare images for the model. See the code examples for more info. - Note that the encoder of MAE is only used to encode the visual patches. The encoded patches are then concatenated with mask tokens, which the decoder (which also consists of Transformer blocks) takes as input. Each mask token is a shared, learned vector that indicates the presence of a missing patch to be predicted. Fixed sin/cos position embeddings are added both to the input of the encoder and the decoder. - For a visual understanding of how MAEs work you can check out this [post](https://keras.io/examples/vision/masked_image_modeling/). ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import ViTMAEModel model = ViTMAEModel.from_pretrained("facebook/vit-mae-base", attn_implementation="sdpa", torch_dtype=torch.float16) ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (A100-40GB, PyTorch 2.3.0, OS Ubuntu 22.04) with `float32` and `facebook/vit-mae-base` model, we saw the following speedups during inference. | Batch size | Average inference time (ms), eager mode | Average inference time (ms), sdpa model | Speed up, Sdpa / Eager (x) | |--------------|-------------------------------------------|-------------------------------------------|------------------------------| | 1 | 11 | 6 | 1.83 | | 2 | 8 | 6 | 1.33 | | 4 | 8 | 6 | 1.33 | | 8 | 8 | 6 | 1.33 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViTMAE. - [`ViTMAEForPreTraining`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining), allowing you to pre-train the model from scratch/further pre-train the model on custom data. - A notebook that illustrates how to visualize reconstructed pixel values with [`ViTMAEForPreTraining`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/ViTMAE/ViT_MAE_visualization_demo.ipynb). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ViTMAEConfig [[autodoc]] ViTMAEConfig <frameworkcontent> <pt> ## ViTMAEModel [[autodoc]] ViTMAEModel - forward ## ViTMAEForPreTraining [[autodoc]] transformers.ViTMAEForPreTraining - forward </pt> <tf> ## TFViTMAEModel [[autodoc]] TFViTMAEModel - call ## TFViTMAEForPreTraining [[autodoc]] transformers.TFViTMAEForPreTraining - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/vit_mae.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vit_mae.md", "repo_id": "transformers", "token_count": 2432 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-RoBERTa-XL ## Overview The XLM-RoBERTa-XL model was proposed in [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau. The abstract from the paper is the following: *Recent work has demonstrated the effectiveness of cross-lingual language model pretraining for cross-lingual understanding. In this study, we present the results of two larger multilingual masked language models, with 3.5B and 10.7B parameters. Our two new models dubbed XLM-R XL and XLM-R XXL outperform XLM-R by 1.8% and 2.4% average accuracy on XNLI. Our model also outperforms the RoBERTa-Large model on several English tasks of the GLUE benchmark by 0.3% on average while handling 99 more languages. This suggests pretrained models with larger capacity may obtain both strong performance on high-resource languages while greatly improving low-resource languages. We make our code and models publicly available.* This model was contributed by [Soonhwan-Kwon](https://github.com/Soonhwan-Kwon) and [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). ## Usage tips XLM-RoBERTa-XL is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require `lang` tensors to understand which language is used, and should be able to determine the correct language from the input ids. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XLMRobertaXLConfig [[autodoc]] XLMRobertaXLConfig ## XLMRobertaXLModel [[autodoc]] XLMRobertaXLModel - forward ## XLMRobertaXLForCausalLM [[autodoc]] XLMRobertaXLForCausalLM - forward ## XLMRobertaXLForMaskedLM [[autodoc]] XLMRobertaXLForMaskedLM - forward ## XLMRobertaXLForSequenceClassification [[autodoc]] XLMRobertaXLForSequenceClassification - forward ## XLMRobertaXLForMultipleChoice [[autodoc]] XLMRobertaXLForMultipleChoice - forward ## XLMRobertaXLForTokenClassification [[autodoc]] XLMRobertaXLForTokenClassification - forward ## XLMRobertaXLForQuestionAnswering [[autodoc]] XLMRobertaXLForQuestionAnswering - forward
transformers/docs/source/en/model_doc/xlm-roberta-xl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-roberta-xl.md", "repo_id": "transformers", "token_count": 969 }
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Performance and Scalability Training large transformer models and deploying them to production present various challenges. During training, the model may require more GPU memory than available or exhibit slow training speed. In the deployment phase, the model can struggle to handle the required throughput in a production environment. This documentation aims to assist you in overcoming these challenges and finding the optimal settings for your use-case. The guides are divided into training and inference sections, as each comes with different challenges and solutions. Within each section you'll find separate guides for different hardware configurations, such as single GPU vs. multi-GPU for training or CPU vs. GPU for inference. Use this document as your starting point to navigate further to the methods that match your scenario. ## Training Training large transformer models efficiently requires an accelerator such as a GPU or TPU. The most common case is where you have a single GPU. The methods that you can apply to improve training efficiency on a single GPU extend to other setups such as multiple GPU. However, there are also techniques that are specific to multi-GPU or CPU training. We cover them in separate sections. * [Methods and tools for efficient training on a single GPU](perf_train_gpu_one): start here to learn common approaches that can help optimize GPU memory utilization, speed up the training, or both. * [Multi-GPU training section](perf_train_gpu_many): explore this section to learn about further optimization methods that apply to a multi-GPU settings, such as data, tensor, and pipeline parallelism. * [CPU training section](perf_train_cpu): learn about mixed precision training on CPU. * [Efficient Training on Multiple CPUs](perf_train_cpu_many): learn about distributed CPU training. * [Training on TPU with TensorFlow](perf_train_tpu_tf): if you are new to TPUs, refer to this section for an opinionated introduction to training on TPUs and using XLA. * [Custom hardware for training](perf_hardware): find tips and tricks when building your own deep learning rig. * [Hyperparameter Search using Trainer API](hpo_train) ## Inference Efficient inference with large models in a production environment can be as challenging as training them. In the following sections we go through the steps to run inference on CPU and single/multi-GPU setups. * [Inference on a single CPU](perf_infer_cpu) * [Inference on a single GPU](perf_infer_gpu_one) * [Multi-GPU inference](perf_infer_gpu_multi) * [XLA Integration for TensorFlow Models](tf_xla) ## Training and inference Here you'll find techniques, tips and tricks that apply whether you are training a model, or running inference with it. * [Instantiating a big model](big_models) * [Troubleshooting performance issues](debugging) ## Contribute This document is far from being complete and a lot more needs to be added, so if you have additions or corrections to make please don't hesitate to open a PR or if you aren't sure start an Issue and we can discuss the details there. When making contributions that A is better than B, please try to include a reproducible benchmark and/or a link to the source of that information (unless it comes directly from you).
transformers/docs/source/en/performance.md/0
{ "file_path": "transformers/docs/source/en/performance.md", "repo_id": "transformers", "token_count": 966 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # HIGGS HIGGS is a 0-shot quantization algorithm that combines Hadamard preprocessing with MSE-Optimal quantization grids to achieve lower quantization error and SOTA performance. You can find more information in the paper [arxiv.org/abs/2411.17525](https://arxiv.org/abs/2411.17525). Runtime support for HIGGS is implemented through [FLUTE](https://arxiv.org/abs/2407.10960), and its [library](https://github.com/HanGuo97/flute). ## Quantization Example ```python from transformers import AutoModelForCausalLM, AutoTokenizer, HiggsConfig model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-9b-it", quantization_config=HiggsConfig(bits=4), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it") tokenizer.decode(model.generate( **tokenizer("Hi,", return_tensors="pt").to(model.device), temperature=0.5, top_p=0.80, )[0]) ``` ## Pre-quantized models Some pre-quantized models can be found in the [official collection](https://huggingface.co/collections/ISTA-DASLab/higgs-675308e432fd56b7f6dab94e) on Hugging Face Hub. ## Current Limitations **Architectures** Currently, FLUTE, and HIGGS by extension, **only support Llama 3 and 3.0 of 8B, 70B and 405B parameters, as well as Gemma-2 9B and 27B**. We're working on allowing to run more diverse models as well as allow arbitrary models by modifying the FLUTE compilation procedure. **torch.compile** HIGGS is fully compatible with `torch.compile`. Compiling `model.forward`, as described [here](../perf_torch_compile.md), here're the speedups it provides on RTX 4090 for `Llama-3.1-8B-Instruct` (forward passes/sec): | Batch Size | BF16 (With `torch.compile`) | HIGGS 4bit (No `torch.compile`) | HIGGS 4bit (With `torch.compile`) | |------------|-----------------------------|----------------------------------|-----------------------------------| | 1 | 59 | 41 | 124 | | 4 | 57 | 42 | 123 | | 16 | 56 | 41 | 120 | **Quantized training** Currently, HIGGS doesn't support quantized training (and backward passes in general). We're working on adding support for it.
transformers/docs/source/en/quantization/higgs.md/0
{ "file_path": "transformers/docs/source/en/quantization/higgs.md", "repo_id": "transformers", "token_count": 1149 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image captioning [[open-in-colab]] Image captioning is the task of predicting a caption for a given image. Common real world applications of it include aiding visually impaired people that can help them navigate through different situations. Therefore, image captioning helps to improve content accessibility for people by describing images to them. This guide will show you how to: * Fine-tune an image captioning model. * Use the fine-tuned model for inference. Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate -q pip install jiwer -q ``` We encourage you to log in to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to log in: ```python from huggingface_hub import notebook_login notebook_login() ``` ## Load the Pokémon BLIP captions dataset Use the 🀗 Dataset library to load a dataset that consists of {image-caption} pairs. To create your own image captioning dataset in PyTorch, you can follow [this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb). ```python from datasets import load_dataset ds = load_dataset("lambdalabs/pokemon-blip-captions") ds ``` ```bash DatasetDict({ train: Dataset({ features: ['image', 'text'], num_rows: 833 }) }) ``` The dataset has two features, `image` and `text`. <Tip> Many image captioning datasets contain multiple captions per image. In those cases, a common strategy is to randomly sample a caption amongst the available ones during training. </Tip> Split the dataset’s train split into a train and test set with the [`~datasets.Dataset.train_test_split`] method: ```python ds = ds["train"].train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` Let's visualize a couple of samples from the training set. ```python from textwrap import wrap import matplotlib.pyplot as plt import numpy as np def plot_images(images, captions): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) caption = captions[i] caption = "\n".join(wrap(caption, 12)) plt.title(caption) plt.imshow(images[i]) plt.axis("off") sample_images_to_visualize = [np.array(train_ds[i]["image"]) for i in range(5)] sample_captions = [train_ds[i]["text"] for i in range(5)] plot_images(sample_images_to_visualize, sample_captions) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_training_images_image_cap.png" alt="Sample training images"/> </div> ## Preprocess the dataset Since the dataset has two modalities (image and text), the pre-processing pipeline will preprocess images and the captions. To do so, load the processor class associated with the model you are about to fine-tune. ```python from transformers import AutoProcessor checkpoint = "microsoft/git-base" processor = AutoProcessor.from_pretrained(checkpoint) ``` The processor will internally pre-process the image (which includes resizing, and pixel scaling) and tokenize the caption. ```python def transforms(example_batch): images = [x for x in example_batch["image"]] captions = [x for x in example_batch["text"]] inputs = processor(images=images, text=captions, padding="max_length") inputs.update({"labels": inputs["input_ids"]}) return inputs train_ds.set_transform(transforms) test_ds.set_transform(transforms) ``` With the dataset ready, you can now set up the model for fine-tuning. ## Load a base model Load the ["microsoft/git-base"](https://huggingface.co/microsoft/git-base) into a [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) object. ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(checkpoint) ``` ## Evaluate Image captioning models are typically evaluated with the [Rouge Score](https://huggingface.co/spaces/evaluate-metric/rouge) or [Word Error Rate](https://huggingface.co/spaces/evaluate-metric/wer). For this guide, you will use the Word Error Rate (WER). We use the 🀗 Evaluate library to do so. For potential limitations and other gotchas of the WER, refer to [this guide](https://huggingface.co/spaces/evaluate-metric/wer). ```python from evaluate import load import torch wer = load("wer") def compute_metrics(eval_pred): logits, labels = eval_pred predicted = logits.argmax(-1) decoded_labels = processor.batch_decode(labels, skip_special_tokens=True) decoded_predictions = processor.batch_decode(predicted, skip_special_tokens=True) wer_score = wer.compute(predictions=decoded_predictions, references=decoded_labels) return {"wer_score": wer_score} ``` ## Train! Now, you are ready to start fine-tuning the model. You will use the 🀗 [`Trainer`] for this. First, define the training arguments using [`TrainingArguments`]. ```python from transformers import TrainingArguments, Trainer model_name = checkpoint.split("/")[1] training_args = TrainingArguments( output_dir=f"{model_name}-pokemon", learning_rate=5e-5, num_train_epochs=50, fp16=True, per_device_train_batch_size=32, per_device_eval_batch_size=32, gradient_accumulation_steps=2, save_total_limit=3, eval_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=50, logging_steps=50, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], load_best_model_at_end=True, ) ``` Then pass them along with the datasets and the model to 🀗 Trainer. ```python trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) ``` To start training, simply call [`~Trainer.train`] on the [`Trainer`] object. ```python trainer.train() ``` You should see the training loss drop smoothly as training progresses. Once training is completed, share your model to the Hub with the [`~Trainer.push_to_hub`] method so everyone can use your model: ```python trainer.push_to_hub() ``` ## Inference Take a sample image from `test_ds` to test the model. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/test_image_image_cap.png" alt="Test image"/> </div> Prepare image for the model. ```python from accelerate.test_utils.testing import get_backend # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) device, _, _ = get_backend() inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values ``` Call [`generate`] and decode the predictions. ```python generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) ``` ```bash a drawing of a pink and blue pokemon ``` Looks like the fine-tuned model generated a pretty good caption!
transformers/docs/source/en/tasks/image_captioning.md/0
{ "file_path": "transformers/docs/source/en/tasks/image_captioning.md", "repo_id": "transformers", "token_count": 2730 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Export to TorchScript <Tip> This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases, with more code examples, a more flexible implementation, and benchmarks comparing Python-based codes with compiled TorchScript. </Tip> According to the [TorchScript documentation](https://pytorch.org/docs/stable/jit.html): > TorchScript is a way to create serializable and optimizable models from PyTorch code. There are two PyTorch modules, [JIT and TRACE](https://pytorch.org/docs/stable/jit.html), that allow developers to export their models to be reused in other programs like efficiency-oriented C++ programs. We provide an interface that allows you to export 🀗 Transformers models to TorchScript so they can be reused in a different environment than PyTorch-based Python programs. Here, we explain how to export and use our models using TorchScript. Exporting a model requires two things: - model instantiation with the `torchscript` flag - a forward pass with dummy inputs These necessities imply several things developers should be careful about as detailed below. ## TorchScript flag and tied weights The `torchscript` flag is necessary because most of the 🀗 Transformers language models have tied weights between their `Embedding` layer and their `Decoding` layer. TorchScript does not allow you to export models that have tied weights, so it is necessary to untie and clone the weights beforehand. Models instantiated with the `torchscript` flag have their `Embedding` layer and `Decoding` layer separated, which means that they should not be trained down the line. Training would desynchronize the two layers, leading to unexpected results. This is not the case for models that do not have a language model head, as those do not have tied weights. These models can be safely exported without the `torchscript` flag. ## Dummy inputs and standard lengths The dummy inputs are used for a models forward pass. While the inputs' values are propagated through the layers, PyTorch keeps track of the different operations executed on each tensor. These recorded operations are then used to create the *trace* of the model. The trace is created relative to the inputs' dimensions. It is therefore constrained by the dimensions of the dummy input, and will not work for any other sequence length or batch size. When trying with a different size, the following error is raised: ``` `The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2` ``` We recommended you trace the model with a dummy input size at least as large as the largest input that will be fed to the model during inference. Padding can help fill the missing values. However, since the model is traced with a larger input size, the dimensions of the matrix will also be large, resulting in more calculations. Be careful of the total number of operations done on each input and follow the performance closely when exporting varying sequence-length models. ## Using TorchScript in Python This section demonstrates how to save and load models as well as how to use the trace for inference. ### Saving a model To export a `BertModel` with TorchScript, instantiate `BertModel` from the `BertConfig` class and then save it to disk under the filename `traced_bert.pt`: ```python from transformers import BertModel, BertTokenizer, BertConfig import torch enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Tokenizing input text text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" tokenized_text = enc.tokenize(text) # Masking one of the input tokens masked_index = 8 tokenized_text[masked_index] = "[MASK]" indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # Creating a dummy input tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] # Initializing the model with the torchscript flag # Flag set to True even though it is not necessary as this model does not have an LM Head. config = BertConfig( vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True, ) # Instantiating the model model = BertModel(config) # The model needs to be in evaluation mode model.eval() # If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True) # Creating the trace traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, "traced_bert.pt") ``` ### Loading a model Now you can load the previously saved `BertModel`, `traced_bert.pt`, from disk and use it on the previously initialised `dummy_input`: ```python loaded_model = torch.jit.load("traced_bert.pt") loaded_model.eval() all_encoder_layers, pooled_output = loaded_model(*dummy_input) ``` ### Using a traced model for inference Use the traced model for inference by using its `__call__` dunder method: ```python traced_model(tokens_tensor, segments_tensors) ``` ## Deploy Hugging Face TorchScript models to AWS with the Neuron SDK AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) instance family for low cost, high performance machine learning inference in the cloud. The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator, specializing in deep learning inferencing workloads. [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) is the SDK for Inferentia that supports tracing and optimizing transformers models for deployment on Inf1. The Neuron SDK provides: 1. Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud. 2. Out of the box performance optimizations for [improved cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>). 3. Support for Hugging Face transformers models built with either [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) or [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html). ### Implications Transformers models based on the [BERT (Bidirectional Encoder Representations from Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert) architecture, or its variants such as [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) and [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) run best on Inf1 for non-generative tasks such as extractive question answering, sequence classification, and token classification. However, text generation tasks can still be adapted to run on Inf1 according to this [AWS Neuron MarianMT tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html). More information about models that can be converted out of the box on Inferentia can be found in the [Model Architecture Fit](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia) section of the Neuron documentation. ### Dependencies Using AWS Neuron to convert models requires a [Neuron SDK environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide) which comes preconfigured on [AWS Deep Learning AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html). ### Converting a model for AWS Neuron Convert a model for AWS NEURON using the same code from [Using TorchScript in Python](torchscript#using-torchscript-in-python) to trace a `BertModel`. Import the `torch.neuron` framework extension to access the components of the Neuron SDK through a Python API: ```python from transformers import BertModel, BertTokenizer, BertConfig import torch import torch.neuron ``` You only need to modify the following line: ```diff - torch.jit.trace(model, [tokens_tensor, segments_tensors]) + torch.neuron.trace(model, [tokens_tensor, segments_tensors]) ``` This enables the Neuron SDK to trace the model and optimize it for Inf1 instances. To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates, please see the [AWS NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
transformers/docs/source/en/torchscript.md/0
{ "file_path": "transformers/docs/source/en/torchscript.md", "repo_id": "transformers", "token_count": 2742 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Debugging ## Debug de problemas de Network multi-GPU Cuando entrenas o infieres con `DistributedDataParallel` y varias GPUs, si encuentras problemas de intercomunicación entre procesos y/o nodos, puedes usar el siguiente script para diagnosticar problemas de red. ```bash wget https://raw.githubusercontent.com/huggingface/transformers/main/scripts/distributed/torch-distributed-gpu-test.py ``` Por ejemplo, para probar cómo interactúan 2 GPUs, haz lo siguiente: ```bash python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` Si ambos procesos pueden hablar entre sí y asignar la memoria de la GPU, cada uno imprimirá un status OK. Para más GPUs o nodos, ajusta los argumentos en el script. Encontrarás muchos más detalles dentro del script de diagnóstico e incluso una receta de cómo ejecutarlo en un entorno SLURM. Un nivel adicional de debug es agregar la variable de entorno `NCCL_DEBUG=INFO` de la siguiente manera: ```bash NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py ``` Esto mostrará mucha información de debug relacionada con NCCL, que luego puedes buscar online si encuentras que reporta algún problema. O si no estás seguro de cómo interpretar el output, puedes compartir el archivo de log en un Issue. ## Detección de Underflow y Overflow <Tip> Esta función está disponible actualmente sólo para PyTorch. </Tip> <Tip> Para el entrenamiento multi-GPU, requiere DDP (`torch.distributed.launch`). </Tip> <Tip> Esta función puede utilizarse con cualquier modelo basado en `nn.Module`. </Tip> Si empiezas a obtener `loss=NaN` o el modelo muestra algún otro comportamiento anormal debido a `inf` o `nan` en activations o weights hay que descubrir dónde se produce el primer underflow o overflow y qué lo ha provocado. Por suerte puedes lograrlo fácilmente activando un módulo especial que hará la detección automáticamente. Si estás usando [`Trainer`], solo necesitas añadir: ```bash --debug underflow_overflow ``` a los argumentos normales de la línea de comandos, o pasar `debug="underflow_overflow"` al crear el objeto [`TrainingArguments`]. Si estás usando tu propio bucle de entrenamiento u otro Trainer puedes lograr lo mismo con: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` [`~debug_utils.DebugUnderflowOverflow`] inserta hooks en el modelo que inmediatamente después de cada forward testeará las variables de input y output y también los weights del módulo correspondiente. Tan pronto como se detecte `inf` o `nan` se detecta en al menos un elemento de las activations o weights, el programa afirmará e imprimirá un informe como este (esto fue capturado con `google/mt5-small` bajo fp16 mixed precision): ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata encoder.block.1.layer.1.DenseReluDense.dropout Dropout 0.00e+00 2.57e+02 input[0] 0.00e+00 2.85e+02 output [...] encoder.block.2.layer.0 T5LayerSelfAttention 6.78e-04 3.15e+03 input[0] 2.65e-04 3.42e+03 output[0] None output[1] 2.25e-01 1.00e+04 output[2] encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.dropout Dropout 0.00e+00 8.76e+03 input[0] 0.00e+00 9.74e+03 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` El output del ejemplo se ha recortado en el centro por razones de brevedad. La segunda columna muestra el valor del elemento más grande en términos absolutos, por lo que si observas con detenimiento los últimos fotogramas, los inputs y outputs estaban en el rango de `1e4`. Así que cuando este entrenamiento se hizo con fp16 mixed precision, el último paso sufrió overflow (ya que bajo `fp16` el mayor número antes de `inf` es `64e3`). Para evitar overflows en `fp16` las activations deben permanecer muy por debajo de `1e4`, porque `1e4 * 1e4 = 1e8` por lo que cualquier matrix multiplication con grandes activations va a llevar a una condición de overflow numérico. Al principio del output puedes descubrir en qué número de batch se produjo el problema (aquí `Detected inf/nan during batch_number=0` significa que el problema se produjo en el primer batch). Cada frame del informe comienza declarando la entrada completamente calificada para el módulo correspondiente que este frame está reportando. Si nos fijamos sólo en este frame: ``` encoder.block.2.layer.1.layer_norm T5LayerNorm 8.69e-02 4.18e-01 weight 2.65e-04 3.42e+03 input[0] 1.79e-06 4.65e+00 output ``` Aquí, `encoder.block.2.layer.1.layer_norm` indica que era una layer norm para la primera capa, del segundo block del encoder. Y la call específica del `forward` es `T5LayerNorm`. Veamos los últimos frames de ese informe: ``` Detected inf/nan during batch_number=0 Last 21 forward frames: abs min abs max metadata [...] encoder.block.2.layer.1.DenseReluDense.wi_0 Linear 2.17e-07 4.50e+00 weight 1.79e-06 4.65e+00 input[0] 2.68e-06 3.70e+01 output encoder.block.2.layer.1.DenseReluDense.wi_1 Linear 8.08e-07 2.66e+01 weight 1.79e-06 4.65e+00 input[0] 1.27e-04 2.37e+02 output encoder.block.2.layer.1.DenseReluDense.wo Linear 1.01e-06 6.44e+00 weight 0.00e+00 9.74e+03 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense 1.79e-06 4.65e+00 input[0] 3.18e-04 6.27e+04 output encoder.block.2.layer.1.dropout Dropout 3.18e-04 6.27e+04 input[0] 0.00e+00 inf output ``` El último frame informa para la función `Dropout.forward` con la primera entrada para el único input y la segunda para el único output. Puedes ver que fue llamada desde un atributo `dropout` dentro de la clase `DenseReluDense`. Podemos ver que ocurrió durante la primera capa, del segundo block, durante el primer batch. Por último, el mayor absoluto elementos de input fue `6.27e+04` y el mismo para el output fue `inf`. Puedes ver aquí, que `T5DenseGatedGeluDense.forward` resultó en output activations, cuyo valor máximo absoluto fue alrededor de 62.7K, que está muy cerca del límite máximo de fp16 de 64K. En el siguiente frame tenemos `Dropout`, el cual renormaliza los weights, después de poner a cero algunos de los elementos, lo que empuja el valor máximo absoluto a más de 64K, y obtenemos un overflow (`inf`). Como puedes ver son los frames anteriores los que tenemos que mirar cuando los números empiezan a ser muy grandes para números fp16. Combinemos el informe con el código de `models/t5/modeling_t5.py`: ```python class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states ``` Ahora es fácil ver la call `dropout`, y también todas las calls anteriores. Dado que la detección se produce en un forward hook, estos informes se imprimen inmediatamente después de que cada `forward` responda. Volviendo al informe completo, para actuar sobre él y arreglar el problema, tenemos que subir unos cuantos frames donde los números empezaron a subir y probablemente cambiar al modo `fp32` aquí, para que los números no sufran overflow cuando se multipliquen o al sumarlos. Por supuesto, puede haber otras soluciones. Por ejemplo, podríamos desactivar `amp` temporalmente si está activado, después de mover el original `forward` dentro de un helper wrapper, así: ```python def _forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states import torch def forward(self, hidden_states): if torch.is_autocast_enabled(): with torch.cuda.amp.autocast(enabled=False): return self._forward(hidden_states) else: return self._forward(hidden_states) ``` Como el detector automático sólo informa de los inputs y outputs de los frames completos, una vez que sepas dónde buscar, puedes analizar también las etapas intermedias de una función específica de `forward`. En este caso, puede utilizar la función función de ayuda `detect_overflow` para inyectar el detector donde quieras, por ejemplo: ```python from debug_utils import detect_overflow class T5LayerFF(nn.Module): [...] def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) detect_overflow(forwarded_states, "after layer_norm") forwarded_states = self.DenseReluDense(forwarded_states) detect_overflow(forwarded_states, "after DenseReluDense") return hidden_states + self.dropout(forwarded_states) ``` Puedes ver que hemos añadido 2 de estos y ahora se trackea si `inf` o `nan` para `forwarded_states` fue detectado en algún punto intermedio. De hecho, el detector ya informa de esto porque cada una de las llamadas en el ejemplo anterior es un `nn.Module`, pero digamos que si tuvieras algunos cálculos directos locales, así es como lo harías. Además, si estás instanciando el debugger en tu propio código, puedes ajustar el número de frames impresos de su valor por defecto, por ejemplo: ```python from .debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` ### Rastreo de valores mínimos y máximos absolutos de batches específicos La misma clase de debugging se puede utilizar para el rastreo por batches con la función de detección de underflow/overflow desactivada. Digamos que quieres ver los valores mínimos y máximos absolutos de todos los ingredientes de cada call `forward` de un determinado batch, y sólo hacerlo para los batches 1 y 3. Entonces instancias esta clase como: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3]) ``` Y ahora los batches 1 y 3 completos serán rastreados usando el mismo formato que el detector de underflow/overflow. Los batches son 0-index. Esto es muy útil si sabes que el programa empieza a comportarse mal después de un determinado número de batch, para que puedas avanzar rápidamente hasta esa área. Aquí hay un ejemplo de output recortado para tal configuración: ``` *** Starting batch number=1 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.47e+04 input[0] 5.36e-05 7.92e+02 output [...] decoder.dropout Dropout 1.60e-07 2.27e+01 input[0] 0.00e+00 2.52e+01 output decoder T5Stack not a tensor output lm_head Linear 1.01e-06 7.92e+02 weight 0.00e+00 1.11e+00 input[0] 6.06e-02 8.39e+01 output T5ForConditionalGeneration not a tensor output *** Starting batch number=3 *** abs min abs max metadata shared Embedding 1.01e-06 7.92e+02 weight 0.00e+00 2.78e+04 input[0] 5.36e-05 7.92e+02 output [...] ``` Aquí obtendrás un gran número de frames mostrados - tantos como forward calls haya en tu modelo, por lo que puede o no ser lo que quieras, pero a veces puede ser más fácil de usar para debug que un debugger normal. Por ejemplo, si un problema comienza a ocurrir en el batch 150. Entonces puedes mostrar las trazas de los batches 149 y 150 y comparar dónde los números empezaron a divergir. También puedes especificar el número de batch después del cual se debe detener el entrenamiento, con: ```python debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3) ```
transformers/docs/source/es/debugging.md/0
{ "file_path": "transformers/docs/source/es/debugging.md", "repo_id": "transformers", "token_count": 5532 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tour rápido [[open-in-colab]] ¡Entra en marcha con los 🀗 Transformers! Comienza usando [`pipeline`] para una inferencia veloz, carga un modelo preentrenado y un tokenizador con una [AutoClass](./model_doc/auto) para resolver tu tarea de texto, visión o audio. <Tip> Todos los ejemplos de código presentados en la documentación tienen un botón arriba a la derecha para elegir si quieres ocultar o mostrar el código en Pytorch o TensorFlow. Si no fuese así, se espera que el código funcione para ambos backends sin ningún cambio. </Tip> ## Pipeline [`pipeline`] es la forma más fácil de usar un modelo preentrenado para una tarea dada. <Youtube id="tiZFewofSLM"/> El [`pipeline`] soporta muchas tareas comunes listas para usar: **Texto**: * Análisis de Sentimiento (Sentiment Analysis, en inglés): clasifica la polaridad de un texto dado. * Generación de Texto (Text Generation, en inglés): genera texto a partir de un input dado. * Reconocimiento de Entidades (Name Entity Recognition o NER, en inglés): etiqueta cada palabra con la entidad que representa (persona, fecha, ubicación, etc.). * Responder Preguntas (Question answering, en inglés): extrae la respuesta del contexto dado un contexto y una pregunta. * Rellenar Máscara (Fill-mask, en inglés): rellena el espacio faltante dado un texto con palabras enmascaradas. * Resumir (Summarization, en inglés): genera un resumen de una secuencia larga de texto o un documento. * Traducción (Translation, en inglés): traduce un texto a otro idioma. * Extracción de Características (Feature Extraction, en inglés): crea una representación tensorial del texto. **Imagen**: * Clasificación de Imágenes (Image Classification, en inglés): clasifica una imagen. * Segmentación de Imágenes (Image Segmentation, en inglés): clasifica cada pixel de una imagen. * Detección de Objetos (Object Detection, en inglés): detecta objetos dentro de una imagen. **Audio**: * Clasificación de Audios (Audio Classification, en inglés): asigna una etiqueta a un segmento de audio. * Reconocimiento de Voz Automático (Automatic Speech Recognition o ASR, en inglés): transcribe datos de audio a un texto. <Tip> Para más detalles acerca del [`pipeline`] y tareas asociadas, consulta la documentación [aquí](./main_classes/pipelines). </Tip> ### Uso del Pipeline En el siguiente ejemplo, usarás el [`pipeline`] para análisis de sentimiento. Instala las siguientes dependencias si aún no lo has hecho: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importa [`pipeline`] y especifica la tarea que deseas completar: ```py >>> from transformers import pipeline >>> clasificador = pipeline("sentiment-analysis", model="pysentimiento/robertuito-sentiment-analysis") ``` El pipeline descarga y almacena en caché el [modelo preentrenado](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) y tokeniza para análisis de sentimiento. Si no hubieramos elegido un modelo el pipeline habría elegido uno por defecto. Ahora puedes usar `clasificador` en tu texto objetivo: ```py >>> clasificador("Estamos muy felices de mostrarte la biblioteca de 🀗 Transformers.") [{'label': 'POS', 'score': 0.9320}] ``` Para más de un enunciado, entrega una lista al [`pipeline`] que devolverá una lista de diccionarios: El [`pipeline`] también puede iterar sobre un dataset entero. Comienza instalando la biblioteca [🀗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crea un [`pipeline`] con la tarea que deseas resolver y el modelo que quieres usar. Coloca el parámetro `device` a `0` para poner los tensores en un dispositivo CUDA: ```py >>> import torch >>> from transformers import pipeline >>> reconocedor_de_voz = pipeline( ... "automatic-speech-recognition", model="jonatasgrosman/wav2vec2-large-xlsr-53-spanish", device=0 ... ) ``` A continuación, carga el dataset (ve 🀗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) para más detalles) sobre el que quisieras iterar. Por ejemplo, vamos a cargar el dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="es-ES", split="train") # doctest: +IGNORE_RESULT ``` Debemos asegurarnos de que la frecuencia de muestreo del conjunto de datos coincide con la frecuencia de muestreo con la que se entrenó `jonatasgrosman/wav2vec2-large-xlsr-53-spanish`. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=reconocedor_de_voz.feature_extractor.sampling_rate)) ``` Los archivos de audio se cargan y remuestrean automáticamente cuando llamamos a la columna `"audio"`. Extraigamos las matrices de onda cruda (raw waveform, en inglés) de las primeras 4 muestras y pasémosla como una lista al pipeline: ```py >>> resultado = reconocedor_de_voz(dataset[:4]["audio"]) >>> print([d["text"] for d in resultado]) ['ahora buenas eh a ver tengo un problema con vuestra aplicación resulta que que quiero hacer una transferencia bancaria a una cuenta conocida pero me da error la aplicación a ver que a ver que puede ser', 'la aplicación no cargue saldo de mi nueva cuenta', 'hola tengo un problema con la aplicación no carga y y tampoco veo que carga el saldo de mi cuenta nueva dice que la aplicación está siendo reparada y ahora no puedo acceder a mi cuenta no necesito inmediatamente', 'hora buena la aplicación no se carga la vida no carga el saldo de mi cuenta nueva dice que la villadenta siendo reparada y oro no puedo hacer a mi cuenta'] ``` Para un dataset más grande, donde los inputs son de mayor tamaño (como en habla/audio o visión), querrás pasar un generador en lugar de una lista que carga todos los inputs en memoria. Ve la [documentación del pipeline](./main_classes/pipelines) para más información. ### Usa otro modelo y otro tokenizador en el pipeline El [`pipeline`] puede acomodarse a cualquier modelo del [Model Hub](https://huggingface.co/models) haciendo más fácil adaptar el [`pipeline`] para otros casos de uso. Por ejemplo, si quisieras un modelo capaz de manejar texto en francés, usa los tags en el Model Hub para filtrar entre los modelos apropiados. El resultado mejor filtrado devuelve un [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) multilingual fine-tuned para el análisis de sentimiento. Genial, ¡vamos a usar este modelo! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Usa [`AutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (más en un `AutoClass` debajo): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Usa [`TFAutoModelForSequenceClassification`] y ['AutoTokenizer'] para cargar un modelo preentrenado y un tokenizador asociado (más en un `TFAutoClass` debajo): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Después puedes especificar el modelo y el tokenizador en el [`pipeline`], y aplicar el `classifier` en tu texto objetivo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes trÚs heureux de vous présenter la bibliothÚque 🀗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Si no pudieras encontrar el modelo para tu caso respectivo de uso necesitarás ajustar un modelo preentrenado a tus datos. Mira nuestro [tutorial de fine-tuning](./training) para aprender cómo. Finalmente, después de que has ajustado tu modelo preentrenado, ¡por favor considera compartirlo (ve el tutorial [aquí](./model_sharing)) con la comunidad en el Model Hub para democratizar el NLP! 🀗 ## AutoClass <Youtube id="AhChOFRegn4"/> Por debajo, las clases [`AutoModelForSequenceClassification`] y [`AutoTokenizer`] trabajan juntas para dar poder al [`pipeline`]. Una [AutoClass](./model_doc/auto) es un atajo que automáticamente recupera la arquitectura de un modelo preentrenado con su nombre o el path. Sólo necesitarás seleccionar el `AutoClass` apropiado para tu tarea y tu tokenizador asociado con [`AutoTokenizer`]. Regresemos a nuestro ejemplo y veamos cómo puedes usar el `AutoClass` para reproducir los resultados del [`pipeline`]. ### AutoTokenizer Un tokenizador es responsable de procesar el texto a un formato que sea entendible para el modelo. Primero, el tokenizador separará el texto en palabras llamadas *tokens*. Hay múltiples reglas que gobiernan el proceso de tokenización incluyendo el cómo separar una palabra y en qué nivel (aprende más sobre tokenización [aquí](./tokenizer_summary)). Lo más importante es recordar que necesitarás instanciar el tokenizador con el mismo nombre del modelo para asegurar que estás usando las mismas reglas de tokenización con las que el modelo fue preentrenado. Carga un tokenizador con [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> nombre_del_modelo = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(nombre_del_modelo) ``` Después, el tokenizador convierte los tokens a números para construir un tensor que servirá como input para el modelo. Esto es conocido como el *vocabulario* del modelo. Pasa tu texto al tokenizador: ```py >>> encoding = tokenizer("Estamos muy felices de mostrarte la biblioteca de 🀗 Transformers.") >>> print(encoding) {'input_ids': [101, 10602, 14000, 13653, 43353, 10107, 10102, 47201, 10218, 10106, 18283, 10102, 100, 58263, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` El tokenizador devolverá un diccionario conteniendo: * [input_ids](./glossary#input-ids): representaciones numéricas de los tokens. * [atttention_mask](.glossary#attention-mask): indica cuáles tokens deben ser atendidos. Como con el [`pipeline`], el tokenizador aceptará una lista de inputs. Además, el tokenizador también puede rellenar (pad, en inglés) y truncar el texto para devolver un lote (batch, en inglés) de longitud uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🀗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🀗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Lee el tutorial de [preprocessing](./preprocessing) para más detalles acerca de la tokenización. ### AutoModel <frameworkcontent> <pt> 🀗 Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`AutoModel`] como cargarías un [`AutoTokenizer`]. La única diferencia es seleccionar el [`AutoModel`] correcto para la tarea. Ya que estás clasificando texto, o secuencias, carga [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Ve el [task summary](./task_summary) para revisar qué clase del [`AutoModel`] deberías usar para cada tarea. </Tip> Ahora puedes pasar tu lote (batch) preprocesado de inputs directamente al modelo. Solo tienes que desempacar el diccionario añadiendo `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` El modelo producirá las activaciones finales en el atributo `logits`. Aplica la función softmax a `logits` para obtener las probabilidades: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🀗 Transformers provee una forma simple y unificada de cargar tus instancias preentrenadas. Esto significa que puedes cargar un [`TFAutoModel`] como cargarías un [`AutoTokenizer`]. La única diferencia es seleccionar el [`TFAutoModel`] correcto para la tarea. Ya que estás clasificando texto, o secuencias, carga [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Ve el [task summary](./task_summary) para revisar qué clase del [`AutoModel`] deberías usar para cada tarea. </Tip> Ahora puedes pasar tu lote preprocesado de inputs directamente al modelo pasando las llaves del diccionario directamente a los tensores: ```py >>> tf_outputs = tf_model(tf_batch) ``` El modelo producirá las activaciones finales en el atributo `logits`. Aplica la función softmax a `logits` para obtener las probabilidades: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> print(tf.math.round(tf_predictions * 10**4) / 10**4) tf.Tensor( [[0.0021 0.0018 0.0116 0.2121 0.7725] [0.2084 0.1826 0.1969 0.1755 0.2365]], shape=(2, 5), dtype=float32) ``` </tf> </frameworkcontent> <Tip> Todos los modelos de 🀗 Transformers (PyTorch o TensorFlow) producirán los tensores *antes* de la función de activación final (como softmax) porque la función de activación final es comúnmente fusionada con la pérdida. </Tip> Los modelos son [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) o [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) estándares así que podrás usarlos en tu training loop usual. Sin embargo, para facilitar las cosas, 🀗 Transformers provee una clase [`Trainer`] para PyTorch que añade funcionalidades para entrenamiento distribuido, precición mixta, y más. Para TensorFlow, puedes usar el método `fit` desde [Keras](https://keras.io/). Consulta el [tutorial de entrenamiento](./training) para más detalles. <Tip> Los outputs del modelo de 🀗 Transformers son dataclasses especiales por lo que sus atributos pueden ser completados en un IDE. Los outputs del modelo también se comportan como tuplas o diccionarios (e.g., puedes indexar con un entero, un slice o una cadena) en cuyo caso los atributos que son `None` son ignorados. </Tip> ### Guarda un modelo <frameworkcontent> <pt> Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Cuando quieras usar el modelo otra vez cárgalo con [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Una vez que se haya hecho fine-tuning a tu modelo puedes guardarlo con tu tokenizador usando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Cuando quieras usar el modelo otra vez cárgalo con [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Una característica particularmente interesante de 🀗 Transformers es la habilidad de guardar el modelo y cargarlo como un modelo de PyTorch o TensorFlow. El parámetro `from_pt` o `from_tf` puede convertir el modelo de un framework al otro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent>
transformers/docs/source/es/quicktour.md/0
{ "file_path": "transformers/docs/source/es/quicktour.md", "repo_id": "transformers", "token_count": 6360 }
# docstyle-ignore INSTALL_CONTENT = """ # Installazione di Transformers ! pip install transformers datasets evaluate accelerate # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
transformers/docs/source/it/_config.py/0
{ "file_path": "transformers/docs/source/it/_config.py", "repo_id": "transformers", "token_count": 190 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Modelli multilingue per l'inferenza [[open-in-colab]] Ci sono diversi modelli multilingue in 🀗 Transformers, e il loro utilizzo per l'inferenza differisce da quello dei modelli monolingua. Non *tutti* gli utilizzi dei modelli multilingue sono però diversi. Alcuni modelli, come [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), possono essere usati come un modello monolingua. Questa guida ti mostrerà come utilizzare modelli multilingue che utilizzano un modo diverso per fare l'inferenza. ## XLM XLM ha dieci diversi checkpoint, di cui solo uno Ú monolingua. I nove checkpoint rimanenti possono essere suddivisi in due categorie: i checkpoint che utilizzano i language embeddings e quelli che non li utilizzano. ### XLM con language embeddings I seguenti modelli XLM utilizzano gli embeddings linguistici per specificare la lingua utilizzata per l'inferenza: - `FacebookAI/xlm-mlm-ende-1024` (Modellazione mascherata del linguaggio (Masked language modeling, in inglese), Inglese-Tedesco) - `FacebookAI/xlm-mlm-enfr-1024` (Modellazione mascherata del linguaggio, Inglese-Francese) - `FacebookAI/xlm-mlm-enro-1024` (Modellazione mascherata del linguaggio, Inglese-Rumeno) - `FacebookAI/xlm-mlm-xnli15-1024` (Modellazione mascherata del linguaggio, lingue XNLI) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Modellazione mascherata del linguaggio + traduzione, lingue XNLI) - `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese) - `FacebookAI/xlm-clm-ende-1024` (Modellazione causale del linguaggio, Inglese-Tedesco) Gli embeddings linguistici sono rappresentati come un tensore delle stesse dimensioni dell' `input_ids` passato al modello. I valori in questi tensori dipendono dal linguaggio usato e sono identificati dagli attributi `lang2id` e `id2lang` del tokenizer. In questo esempio, carica il checkpoint `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese): ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` L'attributo `lang2id` del tokenizer mostra il linguaggio del modello e il suo ids: ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` Poi, crea un esempio di input: ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` Imposta l'id del linguaggio a `"en"` e usalo per definire il language embedding. Il language embedding Ú un tensore riempito con `0` perché questo Ú il language id per l'inglese. Questo tensore dovrebbe avere la stessa dimensione di `input_ids`. ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` Adesso puoi inserire `input_ids` e language embedding nel modello: ```py >>> outputs = model(input_ids, langs=langs) ``` Lo script [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) può generare testo tramite i language embeddings usando i checkpoints `xlm-clm`. ### XLM senza language embeddings I seguenti modelli XLM non richiedono l'utilizzo dei language embeddings per fare inferenza: - `FacebookAI/xlm-mlm-17-1280` (Modellazione mascherata del linguaggio, 17 lingue) - `FacebookAI/xlm-mlm-100-1280` (Modellazione mascherata del linguaggio, 100 lingue) Questi modelli sono utilizzati per rappresentazioni generiche di frasi, a differenza dei precedenti checkpoints XML. ## BERT Il seguente modello BERT può essere usato per compiti multilingue: - `google-bert/bert-base-multilingual-uncased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 102 lingue) - `google-bert/bert-base-multilingual-cased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 104 lingue) Questi modelli non richiedono language embeddings per fare inferenza. Riescono ad identificare il linguaggio dal contesto e inferire di conseguenza. ## XLM-RoBERTa Il seguente modello XLM-RoBERTa può essere usato per compiti multilingue: - `FacebookAI/xlm-roberta-base` (Modellazione mascherata del linguaggio, 100 lingue) - `FacebookAI/xlm-roberta-large` (Modellazione mascherata del linguaggio, 100 lingue) XLM-RoBERTa Ú stato addestrato su 2.5TB di dati CommonCrawl appena creati e puliti in 100 lingue. Offre notevoli vantaggi rispetto ai modelli multilingue rilasciati in precedenza, come mBERT o XLM, in compiti come la classificazione, l'etichettatura delle sequenze e la risposta alle domande. ## M2M100 Il seguente modello M2M100 può essere usato per compiti multilingue: - `facebook/m2m100_418M` (Traduzione) - `facebook/m2m100_1.2B` (Traduzione) In questo esempio, carica il checkpoint `facebook/m2m100_418M` per tradurre dal cinese all'inglese. Puoi impostare la lingua di partenza nel tokenizer: ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "䞍芁插手巫垫的事務, 因為他們是埮劙的, 埈快就會癌怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` Applica il tokenizer al testo: ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese: ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart Il seguente modello MBart può essere usato per compiti multilingue: - `facebook/mbart-large-50-one-to-many-mmt` (Traduzione automatica multilingue uno-a-molti, 50 lingue) - `facebook/mbart-large-50-many-to-many-mmt` (Traduzione automatica multilingue molti-a-molti, 50 lingue) - `facebook/mbart-large-50-many-to-one-mmt` (Traduzione automatica multilingue molti-a-uno, 50 lingue) - `facebook/mbart-large-50` (Traduzione multilingue, 50 lingue) - `facebook/mbart-large-cc25` In questo esempio, carica il checkpoint `facebook/mbart-large-50-many-to-many-mmt` per tradurre dal finlandese all'inglese. Puoi impostare la lingua di partenza nel tokenizer: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "ÄlÀ sekaannu velhojen asioihin, sillÀ ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` Applica il tokenizer sul testo: ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese: ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` Se stai usando il checkpoint `facebook/mbart-large-50-many-to-one-mmt`, non hai bisogno di forzare l'id della lingua obiettivo come primo token generato altrimenti l'uso Ú lo stesso.
transformers/docs/source/it/multilingual.md/0
{ "file_path": "transformers/docs/source/it/multilingual.md", "repo_id": "transformers", "token_count": 3202 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hyperparameter Search using Trainer API 🀗 Transformersは、🀗 Transformersモデルのトレヌニングを最適化する[`Trainer`]クラスを提䟛し、独自のトレヌニングルヌプを手動で蚘述せずにトレヌニングを開始するのが簡単になりたす。[`Trainer`]はハむパヌパラメヌタヌ怜玢のAPIも提䟛しおいたす。このドキュメントでは、それを䟋瀺したす。 ## Hyperparameter Search backend [`Trainer`]は珟圚、4぀のハむパヌパラメヌタヌ怜玢バック゚ンドをサポヌトしおいたす [optuna](https://optuna.org/)、[sigopt](https://sigopt.com/)、[raytune](https://docs.ray.io/en/latest/tune/index.html)、および[wandb](https://wandb.ai/site/sweeps)。 これらを䜿甚する前に、ハむパヌパラメヌタヌ怜玢バック゚ンドをむンストヌルする必芁がありたす。 ```bash pip install optuna/sigopt/wandb/ray[tune] ``` ## How to enable Hyperparameter search in example ハむパヌパラメヌタの怜玢スペヌスを定矩し、異なるバック゚ンドには異なるフォヌマットが必芁です。 Sigoptの堎合、sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter) を参照しおください。それは以䞋のようなものです ```py >>> def sigopt_hp_space(trial): ... return [ ... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, ... { ... "categorical_values": ["16", "32", "64", "128"], ... "name": "per_device_train_batch_size", ... "type": "categorical", ... }, ... ] ``` Optunaに関しおは、[object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py)をご芧ください。以䞋のようになりたす ```py >>> def optuna_hp_space(trial): ... return { ... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), ... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), ... } ``` Optunaは、倚目的のハむパヌパラメヌタ最適化HPOを提䟛しおいたす。 `hyperparameter_search` で `direction` を枡し、耇数の目的関数倀を返すための独自の `compute_objective` を定矩するこずができたす。 Pareto Front`List[BestRun]`は `hyperparameter_search` で返され、[test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py) のテストケヌス `TrainerHyperParameterMultiObjectOptunaIntegrationTest` を参照する必芁がありたす。これは以䞋のようになりたす。 ```py >>> best_trials = trainer.hyperparameter_search( ... direction=["minimize", "maximize"], ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` Ray Tuneに関しお、[object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html)を参照しおください。以䞋のようになりたす ```py >>> def ray_hp_space(trial): ... return { ... "learning_rate": tune.loguniform(1e-6, 1e-4), ... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), ... } ``` Wandbに぀いおは、[object_parameter](https://docs.wandb.ai/guides/sweeps/configuration)をご芧ください。これは以䞋のようになりたす ```py >>> def wandb_hp_space(trial): ... return { ... "method": "random", ... "metric": {"name": "objective", "goal": "minimize"}, ... "parameters": { ... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, ... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, ... }, ... } ``` `model_init` 関数を定矩し、それを [`Trainer`] に枡す䟋を瀺したす ```py >>> def model_init(trial): ... return AutoModelForSequenceClassification.from_pretrained( ... model_args.model_name_or_path, ... from_tf=bool(".ckpt" in model_args.model_name_or_path), ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, ... token=True if model_args.use_auth_token else None, ... ) ``` [`Trainer`] を `model_init` 関数、トレヌニング匕数、トレヌニングデヌタセット、テストデヌタセット、および評䟡関数ず共に䜜成しおください: ```py >>> trainer = Trainer( ... model=None, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... processing_class=tokenizer, ... model_init=model_init, ... data_collator=data_collator, ... ) ``` ハむパヌパラメヌタヌの探玢を呌び出し、最良のトラむアル パラメヌタヌを取埗したす。バック゚ンドは `"optuna"` / `"sigopt"` / `"wandb"` / `"ray"` である可胜性がありたす。方向は `"minimize"` たたは `"maximize"` であり、目暙をより倧きくするか小さくするかを瀺したす。 `compute_objective` 関数を独自に定矩するこずもできたす。定矩されおいない堎合、デフォルトの `compute_objective` が呌び出され、F1などの評䟡メトリックの合蚈が目暙倀ずしお返されたす。 ```py >>> best_trial = trainer.hyperparameter_search( ... direction="maximize", ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` ## Hyperparameter search For DDP finetune 珟圚、DDPDistributed Data Parallelのためのハむパヌパラメヌタヌ怜玢は、Optuna ず SigOpt に察しお有効になっおいたす。ランクれロプロセスのみが怜玢トラむアルを生成し、他のランクに匕数を枡したす。
transformers/docs/source/ja/hpo_train.md/0
{ "file_path": "transformers/docs/source/ja/hpo_train.md", "repo_id": "transformers", "token_count": 2838 }
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ALBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=albert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/albert-base-v2"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## 抂芁 ALBERTモデルは、「[ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942)」ずいう論文でZhenzhong Lan、Mingda Chen、Sebastian Goodman、Kevin Gimpel、Piyush Sharma、Radu Soricutによっお提案されたした。BERTのメモリ消費を枛らしトレヌニングを高速化するためのパラメヌタ削枛技術を2぀瀺しおいたす - 埋め蟌み行列を2぀の小さな行列に分割する。 - グルヌプ間で分割された繰り返し局を䜿甚する。 論文の芁旚は以䞋の通りです *自然蚀語衚珟の事前孊習時にモデルのサむズを増やすず、䞋流タスクのパフォヌマンスが向䞊するこずがしばしばありたす。しかし、ある時点でさらなるモデルの増倧は、GPU/TPUのメモリ制限、長い蚓緎時間、予期せぬモデルの劣化ずいった問題のために困難になりたす。これらの問題に察凊するために、我々はBERTのメモリ消費を䜎枛し、蚓緎速床を高めるための2぀のパラメヌタ削枛技術を提案したす。包括的な実蚌的蚌拠は、我々の提案方法が元のBERTに比べおはるかによくスケヌルするモデルを生み出すこずを瀺しおいたす。たた、文間の䞀貫性をモデリングに焊点を圓おた自己教垫あり損倱を䜿甚し、耇数の文が含たれる䞋流タスクに䞀貫しお助けずなるこずを瀺したす。その結果、我々の最良のモデルは、BERT-largeに比べおパラメヌタが少ないにもかかわらず、GLUE、RACE、SQuADベンチマヌクで新たな最先端の結果を確立したす。* このモデルは[lysandre](https://huggingface.co/lysandre)により提䟛されたした。このモデルのjaxバヌゞョンは[kamalkraj](https://huggingface.co/kamalkraj)により提䟛されたした。オリゞナルのコヌドは[こちら](https://github.com/google-research/ALBERT)で芋るこずができたす。 ## 䜿甚䞊のヒント - ALBERTは絶察䜍眮埋め蟌みを䜿甚するモデルなので、通垞、入力を巊偎ではなく右偎にパディングするこずが掚奚されたす。 - ALBERTは繰り返し局を䜿甚するためメモリ䜿甚量は小さくなりたすが、同じ数の繰り返し局を反埩しなければならないため、隠れ局の数が同じであればBERTのようなアヌキテクチャず同様の蚈算コストがかかりたす。 - 埋め蟌みサむズEは隠れサむズHず異なりたすが、これは埋め蟌みが文脈に䟝存しない䞀぀の埋め蟌みベクトルが䞀぀のトヌクンを衚すのに察し、隠れ状態は文脈に䟝存する1぀の隠れ状態がトヌクン系列を衚すため、H >> Eずするこずがより論理的です。たた、埋め蟌み行列のサむズはV x Eず倧きいですVは語圙サむズ。E < Hであれば、パラメヌタは少なくなりたす。 - 局はパラメヌタを共有するグルヌプに分割されおいたすメモリ節玄のため。次文予枬NSP: Next Sentence Predictionは文の順序予枬に眮き換えられたす入力では、2぀の文AずBそれらは連続しおいるがあり、Aに続いおBを䞎えるか、Bに続いおAを䞎えたす。モデルはそれらが入れ替わっおいるかどうかを予枬する必芁がありたす。 ## 参考資料 - [テキスト分類タスクガむド](../tasks/sequence_classification) - [トヌクン分類タスクガむド](../tasks/token_classification) - [質問応答タスクガむド](../tasks/question_answering) - [マスクされた蚀語モデルタスクガむド](../tasks/masked_language_modeling) - [倚肢遞択タスクガむド](../tasks/multiple_choice) ## AlbertConfig [[autodoc]] AlbertConfig ## AlbertTokenizer [[autodoc]] AlbertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## AlbertTokenizerFast [[autodoc]] AlbertTokenizerFast ## Albert specific outputs [[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput [[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput <frameworkcontent> <pt> ## AlbertModel [[autodoc]] AlbertModel - forward ## AlbertForPreTraining [[autodoc]] AlbertForPreTraining - forward ## AlbertForMaskedLM [[autodoc]] AlbertForMaskedLM - forward ## AlbertForSequenceClassification [[autodoc]] AlbertForSequenceClassification - forward ## AlbertForMultipleChoice [[autodoc]] AlbertForMultipleChoice ## AlbertForTokenClassification [[autodoc]] AlbertForTokenClassification - forward ## AlbertForQuestionAnswering [[autodoc]] AlbertForQuestionAnswering - forward </pt> <tf> ## TFAlbertModel [[autodoc]] TFAlbertModel - call ## TFAlbertForPreTraining [[autodoc]] TFAlbertForPreTraining - call ## TFAlbertForMaskedLM [[autodoc]] TFAlbertForMaskedLM - call ## TFAlbertForSequenceClassification [[autodoc]] TFAlbertForSequenceClassification - call ## TFAlbertForMultipleChoice [[autodoc]] TFAlbertForMultipleChoice - call ## TFAlbertForTokenClassification [[autodoc]] TFAlbertForTokenClassification - call ## TFAlbertForQuestionAnswering [[autodoc]] TFAlbertForQuestionAnswering - call </tf> <jax> ## FlaxAlbertModel [[autodoc]] FlaxAlbertModel - __call__ ## FlaxAlbertForPreTraining [[autodoc]] FlaxAlbertForPreTraining - __call__ ## FlaxAlbertForMaskedLM [[autodoc]] FlaxAlbertForMaskedLM - __call__ ## FlaxAlbertForSequenceClassification [[autodoc]] FlaxAlbertForSequenceClassification - __call__ ## FlaxAlbertForMultipleChoice [[autodoc]] FlaxAlbertForMultipleChoice - __call__ ## FlaxAlbertForTokenClassification [[autodoc]] FlaxAlbertForTokenClassification - __call__ ## FlaxAlbertForQuestionAnswering [[autodoc]] FlaxAlbertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/albert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/albert.md", "repo_id": "transformers", "token_count": 2960 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BigBirdPegasus ## Overview BigBird モデルは、[Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) で提案されたした。 ザヒヌル、マンゞルずグルガネシュ、グルずダベむ、クマヌル・アノィナノァず゚むンズリヌ、ゞョシュアずアルベルティ、クリスずオンタノン、 サンティアゎずファム、フィリップずラブラ、アニルヌドずワン、キヌファンずダン、リヌなど。 BigBird は泚目床が䜎い BERT などの Transformer ベヌスのモデルをさらに長いシヌケンスに拡匵する、Transformer ベヌスのモデル。たばらに加えお アテンションず同様に、BigBird は入力シヌケンスにランダム アテンションだけでなくグロヌバル アテンションも適甚したす。理論的には、 たばらで党䜓的でランダムな泚意を適甚するず、完党な泚意に近づくこずが瀺されおいたすが、 長いシヌケンスでは蚈算効率が倧幅に向䞊したす。より長いコンテキストを凊理できる機胜の結果ずしお、 BigBird は、質問応答や BERT たたは RoBERTa ず比范した芁玄。 論文の芁玄は次のずおりです。 *BERT などのトランスフォヌマヌベヌスのモデルは、NLP で最も成功した深局孊習モデルの 1 ぀です。 残念ながら、それらの䞭栞的な制限の 1 ぀は、シヌケンスに察する二次䟝存性 (䞻にメモリに関する) です。 完党な泚意メカニズムによる長さです。これを解決するために、BigBird は、たばらな泚意メカニズムを提案したす。 この二次䟝存関係を線圢に削枛したす。 BigBird がシヌケンス関数の汎甚近䌌噚であるこずを瀺したす。 チュヌリングは完党であるため、二次完党泚意モデルのこれらの特性が保存されたす。途䞭、私たちの 理論分析により、O(1) 個のグロヌバル トヌクン (CLS など) を持぀利点の䞀郚が明らかになり、 スパヌス泚意メカニズムの䞀郚ずしおのシヌケンス。提案されたスパヌス アテンションは、次の長さのシヌケンスを凊理できたす。 同様のハヌドりェアを䜿甚しお以前に可胜であったものの 8 倍。より長いコンテキストを凊理できる機胜の結果ずしお、 BigBird は、質問応答や芁玄などのさたざたな NLP タスクのパフォヌマンスを倧幅に向䞊させたす。私達も ゲノミクスデヌタぞの新しいアプリケヌションを提案したす。* ## Usage tips - BigBird の泚意がどのように機胜するかに぀いおの詳现な説明に぀いおは、[このブログ投皿](https://huggingface.co/blog/big-bird) を参照しおください。 - BigBird には、**original_full** ず **block_sparse** の 2 ぀の実装が付属しおいたす。シヌケンス長が 1024 未満の堎合、次を䜿甚したす。 **block_sparse** を䜿甚しおもメリットがないため、**original_full** を䜿甚するこずをお勧めしたす。 - コヌドは珟圚、3 ブロックず 2 グロヌバル ブロックのりィンドり サむズを䜿甚しおいたす。 - シヌケンスの長さはブロック サむズで割り切れる必芁がありたす。 - 珟圚の実装では **ITC** のみがサポヌトされおいたす。 - 珟圚の実装では **num_random_blocks = 0** はサポヌトされおいたせん。 - BigBirdPegasus は [PegasusTokenizer](https://github.com/huggingface/transformers/blob/main/src/transformers/models/pegasus/tokenization_pegasus.py) を䜿甚したす。 - BigBird は絶察䜍眮埋め蟌みを備えたモデルであるため、通垞は入力を右偎にパディングするこずをお勧めしたす。 巊。 元のコヌドは [こちら](https://github.com/google-research/bigbird) にありたす。 ## ドキュメント リ゜ヌス - [テキスト分類タスクガむド](../tasks/sequence_classification) - [質問回答タスク ガむド](../tasks/question_answering) - [因果蚀語モデリング タスク ガむド](../tasks/language_modeling) - [翻蚳タスクガむド](../tasks/translation) - [芁玄タスクガむド](../tasks/summarization) ## BigBirdPegasusConfig [[autodoc]] BigBirdPegasusConfig - all ## BigBirdPegasusModel [[autodoc]] BigBirdPegasusModel - forward ## BigBirdPegasusForConditionalGeneration [[autodoc]] BigBirdPegasusForConditionalGeneration - forward ## BigBirdPegasusForSequenceClassification [[autodoc]] BigBirdPegasusForSequenceClassification - forward ## BigBirdPegasusForQuestionAnswering [[autodoc]] BigBirdPegasusForQuestionAnswering - forward ## BigBirdPegasusForCausalLM [[autodoc]] BigBirdPegasusForCausalLM - forward
transformers/docs/source/ja/model_doc/bigbird_pegasus.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bigbird_pegasus.md", "repo_id": "transformers", "token_count": 2264 }
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLIP ## Overview CLIP モデルは、Alec Radford、Jong Wook Kim、Chris Hallacy、Aditya Ramesh、Gabriel Goh Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) で提案されたした。 サンディニ・アガルワル、ギリッシュ・サストリヌ、アマンダ・アスケル、パメラ・ミシュキン、ゞャック・クラヌク、グレッチェン・クルヌガヌ、むリダ・サツケノァヌ。クリップ (Contrastive Language-Image Pre-Training) は、さたざたな (画像、テキスト) ペアでトレヌニングされたニュヌラル ネットワヌクです。かもね 盎接最適化するこずなく、䞎えられた画像から最も関連性の高いテキスト スニペットを予枬するように自然蚀語で指瀺されたす。 GPT-2 および 3 のれロショット機胜ず同様に、タスクに察しお。 論文の芁玄は次のずおりです。 *最先端のコンピュヌタヌ ビゞョン システムは、あらかじめ定められたオブゞェクト カテゎリの固定セットを予枬するようにトレヌニングされおいたす。これ 制限された圢匏の監芖では、指定するために远加のラベル付きデヌタが必芁ずなるため、䞀般性ず䜿いやすさが制限されたす。 その他の芖芚的なコンセプト。画像に関する生のテキストから盎接孊習するこずは、 より広範な監督源。どのキャプションが衚瀺されるかを予枬するずいう単玔な事前トレヌニング タスクが有効であるこずを瀺したす。 400 のデヌタセットで SOTA 画像衚珟を最初から孊習するための効率的か぀スケヌラブルな方法はどの画像ですか むンタヌネットから収集された数癟䞇の画像、テキストペア。事前トレヌニング埌、自然蚀語を䜿甚しお参照したす。 芖芚的な抂念を孊習したたは新しい抂念を説明し、䞋流のタスクぞのモデルのれロショット転送を可胜にしたす。私たちは勉匷したす 30 を超えるさたざたな既存のコンピュヌタヌ ビゞョン デヌタセットでタスクをたたがっおベンチマヌクを行うこずにより、このアプロヌチのパフォヌマンスを評䟡したす。 OCR、ビデオ内のアクション認識、地理的䜍眮特定、およびさたざたな皮類のきめ现かいオブゞェクト分類など。の モデルはほずんどのタスクに簡単に移行でき、倚くの堎合、必芁がなくおも完党に監芖されたベヌスラむンず競合したす。 デヌタセット固有のトレヌニングに適しおいたす。たずえば、ImageNet れロショットではオリゞナルの ResNet-50 の粟床ず䞀臎したす。 トレヌニングに䜿甚された 128 䞇のトレヌニング サンプルを䜿甚する必芁はありたせん。コヌドをリリヌスし、事前トレヌニング枈み モデルの重みはこの https URL で確認できたす。* このモデルは [valhalla](https://huggingface.co/valhalla) によっお提䟛されたした。元のコヌドは [ここ](https://github.com/openai/CLIP) にありたす。 ## Usage tips and example CLIP は、マルチモヌダルなビゞョンおよび蚀語モデルです。画像ずテキストの類䌌性やれロショット画像に䜿甚できたす。 分類。 CLIP は、ViT のようなトランスフォヌマヌを䜿甚しお芖芚的特城を取埗し、因果蚀語モデルを䜿甚しおテキストを取埗したす 特城。次に、テキストず芖芚の䞡方の特城が、同じ次元の朜圚空間に投圱されたす。ドット 投圱された画像ずテキストの特城間の積が同様のスコアずしお䜿甚されたす。 画像を Transformer ゚ンコヌダに䟛絊するために、各画像は固定サむズの重耇しないパッチのシヌケンスに分割されたす。 これらは線圢に埋め蟌たれたす。 [CLS] トヌクンは、むメヌゞ党䜓の衚珟ずしお機胜するために远加されたす。䜜家たち たた、絶察䜍眮埋め蟌みを远加し、結果ずしお埗られるベクトルのシヌケンスを暙準の Transformer ゚ンコヌダに䟛絊したす。 [`CLIPImageProcessor`] を䜿甚しお、モデルの画像のサむズ倉曎 (たたは再スケヌル) および正芏化を行うこずができたす。 [`CLIPTokenizer`] はテキストの゚ンコヌドに䜿甚されたす。 [`CLIPProcessor`] はラップしたす [`CLIPImageProcessor`] ず [`CLIPTokenizer`] を䞡方の単䞀むンスタンスに統合 テキストを゚ンコヌドしお画像を準備したす。次の䟋は、次のメ゜ッドを䜿甚しお画像ずテキストの類䌌性スコアを取埗する方法を瀺しおいたす。 [`CLIPProcessor`] ず [`CLIPModel`]。 ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Resources CLIP を䜿い始めるのに圹立぀公匏 Hugging Face およびコミュニティ (🌎 で瀺されおいる) リ゜ヌスのリスト。 - [リモヌト センシング (衛星) 画像ずキャプションを䜿甚した CLIP の埮調敎](https://huggingface.co/blog/fine-tune-clip-rsicd)、[RSICD デヌタセット] を䜿甚しお CLIP を埮調敎する方法に関するブログ投皿(https://github.com/201528014227051/RSICD_optimal) ず、デヌタ拡匵によるパフォヌマンスの倉化の比范。 - この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/contrastive-image-text) は、プレ- [COCO デヌタセット](https://cocodataset.org/#home) を䜿甚しおトレヌニングされたビゞョンおよびテキスト ゚ンコヌダヌ。 <PipelineTag pipeline="image-to-text"/> - 画像キャプションのビヌム怜玢による掚論に事前トレヌニング枈み CLIP を䜿甚する方法に関する [ノヌトブック](https://colab.research.google.com/drive/1tuoAC5F4sC7qid56Z0ap-stR3rwdk0ZV?usp=sharing)。 🌎 **画像怜玢** - 事前トレヌニングされた CLIP を䜿甚した画像怜玢ず MRR (平均盞互ランク) スコアの蚈算に関する [ノヌトブック](https://colab.research.google.com/drive/1bLVwVKpAndpEDHqjzxVPr_9nGrSbuOQd?usp=sharing)。 🌎 - 画像の取埗ず類䌌性スコアの衚瀺に関する [ノヌトブック](https://colab.research.google.com/github/deep-diver/image_search_with_natural_language/blob/main/notebooks/Image_Search_CLIP.ipynb)。 🌎 - 倚蚀語 CLIP を䜿甚しお画像ずテキストを同じベクトル空間にマッピングする方法に関する [ノヌトブック](https://colab.research.google.com/drive/1xO-wC_m_GNzgjIBQ4a4znvQkvDoZJvH4?usp=sharing)。 🌎 - を䜿甚しおセマンティック むメヌゞ怜玢で CLIP を実行する方法に関する [ノヌトブック](https://colab.research.google.com/github/vivien000/clip-demo/blob/master/clip.ipynb#scrollTo=uzdFhRGqiWkR) [Unsplash](https://unsplash.com) および [TMDB](https://www.themoviedb.org/) デヌタセット。 🌎 **説明可胜性** - 入力トヌクンず画像セグメントの類䌌性を芖芚化する方法に関する [ノヌトブック](https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb)。 🌎 ここに含めるリ゜ヌスの送信に興味がある堎合は、お気軜にプル リク゚ストを開いおください。審査させおいただきたす。 リ゜ヌスは、既存のリ゜ヌスを耇補するのではなく、䜕か新しいものを瀺すこずが理想的です。 ## CLIPConfig [[autodoc]] CLIPConfig - from_text_vision_configs ## CLIPTextConfig [[autodoc]] CLIPTextConfig ## CLIPVisionConfig [[autodoc]] CLIPVisionConfig ## CLIPTokenizer [[autodoc]] CLIPTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CLIPTokenizerFast [[autodoc]] CLIPTokenizerFast ## CLIPImageProcessor [[autodoc]] CLIPImageProcessor - preprocess ## CLIPImageProcessorFast [[autodoc]] CLIPImageProcessorFast - preprocess ## CLIPFeatureExtractor [[autodoc]] CLIPFeatureExtractor ## CLIPProcessor [[autodoc]] CLIPProcessor <frameworkcontent> <pt> ## CLIPModel [[autodoc]] CLIPModel - forward - get_text_features - get_image_features ## CLIPTextModel [[autodoc]] CLIPTextModel - forward ## CLIPTextModelWithProjection [[autodoc]] CLIPTextModelWithProjection - forward ## CLIPVisionModelWithProjection [[autodoc]] CLIPVisionModelWithProjection - forward ## CLIPVisionModel [[autodoc]] CLIPVisionModel - forward </pt> <tf> ## TFCLIPModel [[autodoc]] TFCLIPModel - call - get_text_features - get_image_features ## TFCLIPTextModel [[autodoc]] TFCLIPTextModel - call ## TFCLIPVisionModel [[autodoc]] TFCLIPVisionModel - call </tf> <jax> ## FlaxCLIPModel [[autodoc]] FlaxCLIPModel - __call__ - get_text_features - get_image_features ## FlaxCLIPTextModel [[autodoc]] FlaxCLIPTextModel - __call__ ## FlaxCLIPTextModelWithProjection [[autodoc]] FlaxCLIPTextModelWithProjection - __call__ ## FlaxCLIPVisionModel [[autodoc]] FlaxCLIPVisionModel - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/clip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clip.md", "repo_id": "transformers", "token_count": 4574 }